2024-12-10 14:25:13,001 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-10 14:25:13,015 main DEBUG Took 0.012084 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 14:25:13,016 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 14:25:13,016 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 14:25:13,017 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 14:25:13,018 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,025 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 14:25:13,036 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,037 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,038 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,038 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,039 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,039 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,040 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,040 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,041 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,041 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,042 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,042 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,042 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,042 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,043 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,043 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,044 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,044 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,044 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,044 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,045 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,045 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,045 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,046 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 14:25:13,046 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,046 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 14:25:13,048 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 14:25:13,049 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 14:25:13,050 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 14:25:13,051 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 14:25:13,052 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 14:25:13,052 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 14:25:13,060 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 14:25:13,062 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 14:25:13,064 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 14:25:13,064 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 14:25:13,065 main DEBUG createAppenders(={Console}) 2024-12-10 14:25:13,065 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-10 14:25:13,066 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-10 14:25:13,066 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-10 14:25:13,066 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 14:25:13,066 main DEBUG OutputStream closed 2024-12-10 14:25:13,067 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 14:25:13,067 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 14:25:13,067 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-10 14:25:13,137 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 14:25:13,140 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 14:25:13,142 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 14:25:13,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 14:25:13,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 14:25:13,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 14:25:13,145 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 14:25:13,145 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 14:25:13,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 14:25:13,146 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 14:25:13,147 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 14:25:13,147 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 14:25:13,148 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 14:25:13,148 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 14:25:13,148 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 14:25:13,149 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 14:25:13,149 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 14:25:13,150 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 14:25:13,154 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 14:25:13,154 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-10 14:25:13,155 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 14:25:13,156 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-10T14:25:13,383 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49 2024-12-10 14:25:13,386 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 14:25:13,386 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T14:25:13,395 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-10T14:25:13,418 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T14:25:13,421 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8, deleteOnExit=true 2024-12-10T14:25:13,422 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-10T14:25:13,423 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/test.cache.data in system properties and HBase conf 2024-12-10T14:25:13,424 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T14:25:13,424 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/hadoop.log.dir in system properties and HBase conf 2024-12-10T14:25:13,425 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T14:25:13,426 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T14:25:13,426 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-10T14:25:13,524 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T14:25:13,619 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T14:25:13,623 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T14:25:13,623 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T14:25:13,624 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T14:25:13,624 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T14:25:13,625 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T14:25:13,625 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T14:25:13,625 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T14:25:13,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T14:25:13,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T14:25:13,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/nfs.dump.dir in system properties and HBase conf 2024-12-10T14:25:13,627 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/java.io.tmpdir in system properties and HBase conf 2024-12-10T14:25:13,627 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T14:25:13,627 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T14:25:13,628 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T14:25:14,443 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T14:25:14,538 INFO [Time-limited test {}] log.Log(170): Logging initialized @2235ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T14:25:14,628 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:25:14,693 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:25:14,714 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:25:14,714 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:25:14,715 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T14:25:14,729 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:25:14,731 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:25:14,732 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T14:25:14,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/java.io.tmpdir/jetty-localhost-46475-hadoop-hdfs-3_4_1-tests_jar-_-any-12093577779311340520/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T14:25:14,940 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:46475} 2024-12-10T14:25:14,940 INFO [Time-limited test {}] server.Server(415): Started @2638ms 2024-12-10T14:25:15,329 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T14:25:15,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T14:25:15,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T14:25:15,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T14:25:15,339 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T14:25:15,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/hadoop.log.dir/,AVAILABLE} 2024-12-10T14:25:15,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T14:25:15,460 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3054265c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/java.io.tmpdir/jetty-localhost-40639-hadoop-hdfs-3_4_1-tests_jar-_-any-3151994716073176772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T14:25:15,461 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65902fec{HTTP/1.1, (http/1.1)}{localhost:40639} 2024-12-10T14:25:15,461 INFO [Time-limited test {}] server.Server(415): Started @3159ms 2024-12-10T14:25:15,516 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T14:25:16,006 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/dfs/data/data2/current/BP-1903503158-172.17.0.2-1733840714192/current, will proceed with Du for space computation calculation, 2024-12-10T14:25:16,006 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/dfs/data/data1/current/BP-1903503158-172.17.0.2-1733840714192/current, will proceed with Du for space computation calculation, 2024-12-10T14:25:16,045 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T14:25:16,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ac0224c64ac7ea4 with lease ID 0x80bdc51df355782e: Processing first storage report for DS-86872b83-c5b1-4604-9e37-35fb12cc34f2 from datanode DatanodeRegistration(127.0.0.1:41075, datanodeUuid=f005e3c9-5d83-41ba-9f33-c9bb2afc7f0c, infoPort=42405, infoSecurePort=0, ipcPort=44207, storageInfo=lv=-57;cid=testClusterID;nsid=1822246522;c=1733840714192) 2024-12-10T14:25:16,104 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ac0224c64ac7ea4 with lease ID 0x80bdc51df355782e: from storage DS-86872b83-c5b1-4604-9e37-35fb12cc34f2 node DatanodeRegistration(127.0.0.1:41075, datanodeUuid=f005e3c9-5d83-41ba-9f33-c9bb2afc7f0c, infoPort=42405, infoSecurePort=0, ipcPort=44207, storageInfo=lv=-57;cid=testClusterID;nsid=1822246522;c=1733840714192), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T14:25:16,104 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ac0224c64ac7ea4 with lease ID 0x80bdc51df355782e: Processing first storage report for DS-01a9765c-0009-4da0-a37b-3e577df9c3a7 from datanode DatanodeRegistration(127.0.0.1:41075, datanodeUuid=f005e3c9-5d83-41ba-9f33-c9bb2afc7f0c, infoPort=42405, infoSecurePort=0, ipcPort=44207, storageInfo=lv=-57;cid=testClusterID;nsid=1822246522;c=1733840714192) 2024-12-10T14:25:16,105 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ac0224c64ac7ea4 with lease ID 0x80bdc51df355782e: from storage DS-01a9765c-0009-4da0-a37b-3e577df9c3a7 node DatanodeRegistration(127.0.0.1:41075, datanodeUuid=f005e3c9-5d83-41ba-9f33-c9bb2afc7f0c, infoPort=42405, infoSecurePort=0, ipcPort=44207, storageInfo=lv=-57;cid=testClusterID;nsid=1822246522;c=1733840714192), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T14:25:16,181 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49 2024-12-10T14:25:16,257 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/zookeeper_0, clientPort=58494, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T14:25:16,266 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=58494 2024-12-10T14:25:16,280 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:25:16,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:25:16,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741825_1001 (size=7) 2024-12-10T14:25:16,917 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da with version=8 2024-12-10T14:25:16,917 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/hbase-staging 2024-12-10T14:25:17,052 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T14:25:17,315 INFO [Time-limited test {}] client.ConnectionUtils(129): master/db1d50717577:0 server-side Connection retries=45 2024-12-10T14:25:17,334 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:25:17,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T14:25:17,335 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T14:25:17,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:25:17,335 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T14:25:17,470 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T14:25:17,528 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T14:25:17,537 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T14:25:17,541 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T14:25:17,568 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12074 (auto-detected) 2024-12-10T14:25:17,569 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T14:25:17,588 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33823 2024-12-10T14:25:17,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:25:17,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:25:17,610 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33823 connecting to ZooKeeper ensemble=127.0.0.1:58494 2024-12-10T14:25:17,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:338230x0, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T14:25:17,645 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33823-0x1019cc3ac5f0000 connected 2024-12-10T14:25:17,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:25:17,678 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:25:17,681 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T14:25:17,684 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33823 2024-12-10T14:25:17,685 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33823 2024-12-10T14:25:17,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33823 2024-12-10T14:25:17,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33823 2024-12-10T14:25:17,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33823 2024-12-10T14:25:17,696 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da, hbase.cluster.distributed=false 2024-12-10T14:25:17,758 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/db1d50717577:0 server-side Connection retries=45 2024-12-10T14:25:17,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:25:17,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T14:25:17,758 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T14:25:17,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T14:25:17,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T14:25:17,761 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T14:25:17,763 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T14:25:17,764 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46699 2024-12-10T14:25:17,765 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T14:25:17,771 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T14:25:17,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:25:17,774 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:25:17,778 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46699 connecting to ZooKeeper ensemble=127.0.0.1:58494 2024-12-10T14:25:17,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:466990x0, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T14:25:17,782 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46699-0x1019cc3ac5f0001 connected 2024-12-10T14:25:17,782 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:25:17,784 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:25:17,784 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T14:25:17,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46699 2024-12-10T14:25:17,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46699 2024-12-10T14:25:17,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46699 2024-12-10T14:25:17,787 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46699 2024-12-10T14:25:17,788 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46699 2024-12-10T14:25:17,790 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/db1d50717577,33823,1733840717045 2024-12-10T14:25:17,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:25:17,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:25:17,798 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db1d50717577,33823,1733840717045 2024-12-10T14:25:17,805 DEBUG [M:0;db1d50717577:33823 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db1d50717577:33823 2024-12-10T14:25:17,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T14:25:17,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T14:25:17,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:17,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:17,820 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T14:25:17,821 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T14:25:17,821 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db1d50717577,33823,1733840717045 from backup master directory 2024-12-10T14:25:17,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db1d50717577,33823,1733840717045 2024-12-10T14:25:17,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:25:17,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T14:25:17,825 WARN [master/db1d50717577:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T14:25:17,825 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db1d50717577,33823,1733840717045 2024-12-10T14:25:17,828 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T14:25:17,829 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T14:25:17,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741826_1002 (size=42) 2024-12-10T14:25:18,298 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/hbase.id with ID: 429bc220-7e8d-485e-9117-1a5f89e4239f 2024-12-10T14:25:18,338 INFO [master/db1d50717577:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T14:25:18,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:18,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:18,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741827_1003 (size=196) 2024-12-10T14:25:18,796 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:25:18,798 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T14:25:18,815 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:18,819 INFO [master/db1d50717577:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:25:18,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741828_1004 (size=1189) 2024-12-10T14:25:18,867 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store 2024-12-10T14:25:18,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741829_1005 (size=34) 2024-12-10T14:25:18,885 INFO [master/db1d50717577:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T14:25:18,886 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:18,887 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T14:25:18,887 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:25:18,887 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:25:18,887 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T14:25:18,887 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:25:18,887 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:25:18,887 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T14:25:18,889 WARN [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/.initializing 2024-12-10T14:25:18,889 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/WALs/db1d50717577,33823,1733840717045 2024-12-10T14:25:18,895 INFO [master/db1d50717577:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T14:25:18,905 INFO [master/db1d50717577:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C33823%2C1733840717045, suffix=, logDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/WALs/db1d50717577,33823,1733840717045, archiveDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/oldWALs, maxLogs=10 2024-12-10T14:25:18,927 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/WALs/db1d50717577,33823,1733840717045/db1d50717577%2C33823%2C1733840717045.1733840718909, exclude list is [], retry=0 2024-12-10T14:25:18,944 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41075,DS-86872b83-c5b1-4604-9e37-35fb12cc34f2,DISK] 2024-12-10T14:25:18,947 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T14:25:18,984 INFO [master/db1d50717577:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/WALs/db1d50717577,33823,1733840717045/db1d50717577%2C33823%2C1733840717045.1733840718909 2024-12-10T14:25:18,985 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42405:42405)] 2024-12-10T14:25:18,986 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:25:18,986 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:18,990 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:18,990 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T14:25:19,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:19,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:25:19,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T14:25:19,066 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:19,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:19,067 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,070 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T14:25:19,070 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:19,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:19,071 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T14:25:19,074 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:19,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:19,078 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,079 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,088 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T14:25:19,091 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T14:25:19,096 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:25:19,096 INFO [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74447979, jitterRate=0.10936133563518524}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T14:25:19,100 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T14:25:19,101 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T14:25:19,130 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56652484, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:19,164 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-10T14:25:19,176 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T14:25:19,176 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T14:25:19,179 INFO [master/db1d50717577:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T14:25:19,180 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T14:25:19,185 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-10T14:25:19,185 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T14:25:19,209 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T14:25:19,221 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T14:25:19,223 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-10T14:25:19,225 INFO [master/db1d50717577:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T14:25:19,226 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T14:25:19,228 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-10T14:25:19,230 INFO [master/db1d50717577:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T14:25:19,233 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T14:25:19,236 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-10T14:25:19,237 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T14:25:19,239 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T14:25:19,249 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T14:25:19,250 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T14:25:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T14:25:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T14:25:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:19,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:19,255 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=db1d50717577,33823,1733840717045, sessionid=0x1019cc3ac5f0000, setting cluster-up flag (Was=false) 2024-12-10T14:25:19,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:19,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:19,274 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T14:25:19,276 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db1d50717577,33823,1733840717045 2024-12-10T14:25:19,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:19,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:19,291 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T14:25:19,293 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db1d50717577,33823,1733840717045 2024-12-10T14:25:19,370 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-10T14:25:19,376 INFO [master/db1d50717577:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-10T14:25:19,378 INFO [master/db1d50717577:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T14:25:19,383 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db1d50717577,33823,1733840717045 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T14:25:19,387 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:25:19,387 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:25:19,387 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:25:19,387 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db1d50717577:0, corePoolSize=5, maxPoolSize=5 2024-12-10T14:25:19,387 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db1d50717577:0, corePoolSize=10, maxPoolSize=10 2024-12-10T14:25:19,388 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,388 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db1d50717577:0, corePoolSize=2, maxPoolSize=2 2024-12-10T14:25:19,388 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,390 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733840749390 2024-12-10T14:25:19,391 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T14:25:19,392 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T14:25:19,392 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T14:25:19,393 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-10T14:25:19,395 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T14:25:19,395 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T14:25:19,396 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T14:25:19,396 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T14:25:19,396 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,397 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:19,397 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T14:25:19,397 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T14:25:19,398 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T14:25:19,399 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T14:25:19,402 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T14:25:19,402 INFO [master/db1d50717577:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T14:25:19,404 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.large.0-1733840719403,5,FailOnTimeoutGroup] 2024-12-10T14:25:19,406 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.small.0-1733840719404,5,FailOnTimeoutGroup] 2024-12-10T14:25:19,406 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,407 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T14:25:19,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741831_1007 (size=1039) 2024-12-10T14:25:19,408 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,408 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,409 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db1d50717577:46699 2024-12-10T14:25:19,411 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1008): ClusterId : 429bc220-7e8d-485e-9117-1a5f89e4239f 2024-12-10T14:25:19,413 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T14:25:19,418 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T14:25:19,418 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T14:25:19,420 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T14:25:19,421 DEBUG [RS:0;db1d50717577:46699 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4af71e53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:19,422 DEBUG [RS:0;db1d50717577:46699 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a3847a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db1d50717577/172.17.0.2:0 2024-12-10T14:25:19,425 INFO [RS:0;db1d50717577:46699 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-10T14:25:19,425 INFO [RS:0;db1d50717577:46699 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-10T14:25:19,425 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-10T14:25:19,427 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(3073): reportForDuty to master=db1d50717577,33823,1733840717045 with isa=db1d50717577/172.17.0.2:46699, startcode=1733840717757 2024-12-10T14:25:19,439 DEBUG [RS:0;db1d50717577:46699 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:25:19,472 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36865, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:25:19,479 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33823 {}] master.ServerManager(332): Checking decommissioned status of RegionServer db1d50717577,46699,1733840717757 2024-12-10T14:25:19,482 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33823 {}] master.ServerManager(486): Registering regionserver=db1d50717577,46699,1733840717757 2024-12-10T14:25:19,499 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:25:19,499 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38801 2024-12-10T14:25:19,499 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-10T14:25:19,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T14:25:19,505 DEBUG [RS:0;db1d50717577:46699 {}] zookeeper.ZKUtil(111): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db1d50717577,46699,1733840717757 2024-12-10T14:25:19,506 WARN [RS:0;db1d50717577:46699 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T14:25:19,506 INFO [RS:0;db1d50717577:46699 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:25:19,506 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/WALs/db1d50717577,46699,1733840717757 2024-12-10T14:25:19,508 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db1d50717577,46699,1733840717757] 2024-12-10T14:25:19,519 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-10T14:25:19,531 INFO [RS:0;db1d50717577:46699 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T14:25:19,545 INFO [RS:0;db1d50717577:46699 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T14:25:19,548 INFO [RS:0;db1d50717577:46699 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T14:25:19,548 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,549 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-10T14:25:19,556 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,557 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,557 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,557 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,557 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,558 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,558 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db1d50717577:0, corePoolSize=2, maxPoolSize=2 2024-12-10T14:25:19,558 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,558 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,558 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,559 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,559 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db1d50717577:0, corePoolSize=1, maxPoolSize=1 2024-12-10T14:25:19,559 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:25:19,559 DEBUG [RS:0;db1d50717577:46699 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0, corePoolSize=3, maxPoolSize=3 2024-12-10T14:25:19,561 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,561 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,562 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,562 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,562 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,46699,1733840717757-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T14:25:19,582 INFO [RS:0;db1d50717577:46699 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T14:25:19,584 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,46699,1733840717757-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:19,603 INFO [RS:0;db1d50717577:46699 {}] regionserver.Replication(204): db1d50717577,46699,1733840717757 started 2024-12-10T14:25:19,604 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1767): Serving as db1d50717577,46699,1733840717757, RpcServer on db1d50717577/172.17.0.2:46699, sessionid=0x1019cc3ac5f0001 2024-12-10T14:25:19,604 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T14:25:19,604 DEBUG [RS:0;db1d50717577:46699 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db1d50717577,46699,1733840717757 2024-12-10T14:25:19,604 DEBUG [RS:0;db1d50717577:46699 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,46699,1733840717757' 2024-12-10T14:25:19,604 DEBUG [RS:0;db1d50717577:46699 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T14:25:19,605 DEBUG [RS:0;db1d50717577:46699 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T14:25:19,606 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T14:25:19,606 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T14:25:19,606 DEBUG [RS:0;db1d50717577:46699 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db1d50717577,46699,1733840717757 2024-12-10T14:25:19,606 DEBUG [RS:0;db1d50717577:46699 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db1d50717577,46699,1733840717757' 2024-12-10T14:25:19,606 DEBUG [RS:0;db1d50717577:46699 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T14:25:19,607 DEBUG [RS:0;db1d50717577:46699 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T14:25:19,608 DEBUG [RS:0;db1d50717577:46699 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T14:25:19,608 INFO [RS:0;db1d50717577:46699 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T14:25:19,608 INFO [RS:0;db1d50717577:46699 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T14:25:19,713 INFO [RS:0;db1d50717577:46699 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T14:25:19,717 INFO [RS:0;db1d50717577:46699 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C46699%2C1733840717757, suffix=, logDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/WALs/db1d50717577,46699,1733840717757, archiveDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/oldWALs, maxLogs=32 2024-12-10T14:25:19,733 DEBUG [RS:0;db1d50717577:46699 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/WALs/db1d50717577,46699,1733840717757/db1d50717577%2C46699%2C1733840717757.1733840719719, exclude list is [], retry=0 2024-12-10T14:25:19,738 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41075,DS-86872b83-c5b1-4604-9e37-35fb12cc34f2,DISK] 2024-12-10T14:25:19,742 INFO [RS:0;db1d50717577:46699 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/WALs/db1d50717577,46699,1733840717757/db1d50717577%2C46699%2C1733840717757.1733840719719 2024-12-10T14:25:19,742 DEBUG [RS:0;db1d50717577:46699 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42405:42405)] 2024-12-10T14:25:19,810 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-10T14:25:19,810 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:25:19,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741833_1009 (size=32) 2024-12-10T14:25:20,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:20,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T14:25:20,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T14:25:20,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:20,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:25:20,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T14:25:20,230 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T14:25:20,231 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:20,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:25:20,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T14:25:20,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T14:25:20,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:20,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:25:20,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740 2024-12-10T14:25:20,238 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740 2024-12-10T14:25:20,240 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:25:20,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T14:25:20,247 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:25:20,248 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58730431, jitterRate=-0.1248483806848526}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:25:20,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T14:25:20,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T14:25:20,250 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T14:25:20,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T14:25:20,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T14:25:20,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T14:25:20,252 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T14:25:20,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T14:25:20,254 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T14:25:20,254 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-10T14:25:20,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T14:25:20,268 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T14:25:20,270 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T14:25:20,422 DEBUG [db1d50717577:33823 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-10T14:25:20,427 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:20,431 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db1d50717577,46699,1733840717757, state=OPENING 2024-12-10T14:25:20,436 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T14:25:20,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:20,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:20,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:25:20,439 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:25:20,441 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:25:20,615 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:20,617 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T14:25:20,620 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T14:25:20,631 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-10T14:25:20,631 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T14:25:20,632 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T14:25:20,635 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db1d50717577%2C46699%2C1733840717757.meta, suffix=.meta, logDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/WALs/db1d50717577,46699,1733840717757, archiveDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/oldWALs, maxLogs=32 2024-12-10T14:25:20,652 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/WALs/db1d50717577,46699,1733840717757/db1d50717577%2C46699%2C1733840717757.meta.1733840720637.meta, exclude list is [], retry=0 2024-12-10T14:25:20,655 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41075,DS-86872b83-c5b1-4604-9e37-35fb12cc34f2,DISK] 2024-12-10T14:25:20,660 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/WALs/db1d50717577,46699,1733840717757/db1d50717577%2C46699%2C1733840717757.meta.1733840720637.meta 2024-12-10T14:25:20,660 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42405:42405)] 2024-12-10T14:25:20,661 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:25:20,662 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T14:25:20,721 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T14:25:20,725 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T14:25:20,730 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T14:25:20,730 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:20,730 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-10T14:25:20,730 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-10T14:25:20,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T14:25:20,735 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T14:25:20,735 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:20,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:25:20,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T14:25:20,738 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T14:25:20,738 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:20,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:25:20,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T14:25:20,740 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T14:25:20,740 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:20,741 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T14:25:20,743 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740 2024-12-10T14:25:20,746 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740 2024-12-10T14:25:20,748 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:25:20,751 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T14:25:20,753 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61575706, jitterRate=-0.08245047926902771}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:25:20,754 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T14:25:20,761 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733840720609 2024-12-10T14:25:20,772 DEBUG [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T14:25:20,772 INFO [RS_OPEN_META-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-10T14:25:20,774 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:20,775 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db1d50717577,46699,1733840717757, state=OPEN 2024-12-10T14:25:20,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T14:25:20,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T14:25:20,782 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:25:20,782 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T14:25:20,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T14:25:20,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=db1d50717577,46699,1733840717757 in 342 msec 2024-12-10T14:25:20,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T14:25:20,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 527 msec 2024-12-10T14:25:20,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4710 sec 2024-12-10T14:25:20,797 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733840720797, completionTime=-1 2024-12-10T14:25:20,797 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-10T14:25:20,798 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-10T14:25:20,835 DEBUG [hconnection-0x540ea891-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:20,838 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:20,849 INFO [master/db1d50717577:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-10T14:25:20,849 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733840780849 2024-12-10T14:25:20,849 INFO [master/db1d50717577:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733840840849 2024-12-10T14:25:20,849 INFO [master/db1d50717577:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 51 msec 2024-12-10T14:25:20,871 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,33823,1733840717045-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:20,871 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,33823,1733840717045-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:20,872 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,33823,1733840717045-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:20,873 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db1d50717577:33823, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:20,873 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:20,879 DEBUG [master/db1d50717577:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-10T14:25:20,881 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-10T14:25:20,883 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T14:25:20,889 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-10T14:25:20,891 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:25:20,893 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:20,894 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:25:20,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741835_1011 (size=358) 2024-12-10T14:25:21,309 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d677ce41b1f947badc4a07f8de4e4b16, NAME => 'hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:25:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741836_1012 (size=42) 2024-12-10T14:25:21,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:21,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing d677ce41b1f947badc4a07f8de4e4b16, disabling compactions & flushes 2024-12-10T14:25:21,720 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:25:21,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:25:21,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. after waiting 0 ms 2024-12-10T14:25:21,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:25:21,720 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:25:21,720 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for d677ce41b1f947badc4a07f8de4e4b16: 2024-12-10T14:25:21,723 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:25:21,729 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733840721724"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733840721724"}]},"ts":"1733840721724"} 2024-12-10T14:25:21,752 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:25:21,754 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:25:21,757 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840721754"}]},"ts":"1733840721754"} 2024-12-10T14:25:21,761 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-10T14:25:21,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d677ce41b1f947badc4a07f8de4e4b16, ASSIGN}] 2024-12-10T14:25:21,770 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d677ce41b1f947badc4a07f8de4e4b16, ASSIGN 2024-12-10T14:25:21,772 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=d677ce41b1f947badc4a07f8de4e4b16, ASSIGN; state=OFFLINE, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=false 2024-12-10T14:25:21,923 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d677ce41b1f947badc4a07f8de4e4b16, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:21,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure d677ce41b1f947badc4a07f8de4e4b16, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:25:22,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:22,086 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:25:22,086 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => d677ce41b1f947badc4a07f8de4e4b16, NAME => 'hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:25:22,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:25:22,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:22,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:25:22,087 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:25:22,089 INFO [StoreOpener-d677ce41b1f947badc4a07f8de4e4b16-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:25:22,092 INFO [StoreOpener-d677ce41b1f947badc4a07f8de4e4b16-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d677ce41b1f947badc4a07f8de4e4b16 columnFamilyName info 2024-12-10T14:25:22,092 DEBUG [StoreOpener-d677ce41b1f947badc4a07f8de4e4b16-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:22,093 INFO [StoreOpener-d677ce41b1f947badc4a07f8de4e4b16-1 {}] regionserver.HStore(327): Store=d677ce41b1f947badc4a07f8de4e4b16/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:22,094 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:25:22,094 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:25:22,098 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:25:22,101 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:25:22,102 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened d677ce41b1f947badc4a07f8de4e4b16; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75062309, jitterRate=0.11851556599140167}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T14:25:22,103 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for d677ce41b1f947badc4a07f8de4e4b16: 2024-12-10T14:25:22,105 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16., pid=6, masterSystemTime=1733840722080 2024-12-10T14:25:22,108 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:25:22,108 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:25:22,109 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d677ce41b1f947badc4a07f8de4e4b16, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:22,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T14:25:22,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure d677ce41b1f947badc4a07f8de4e4b16, server=db1d50717577,46699,1733840717757 in 187 msec 2024-12-10T14:25:22,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T14:25:22,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=d677ce41b1f947badc4a07f8de4e4b16, ASSIGN in 348 msec 2024-12-10T14:25:22,121 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:25:22,122 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840722121"}]},"ts":"1733840722121"} 2024-12-10T14:25:22,124 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-10T14:25:22,128 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:25:22,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2450 sec 2024-12-10T14:25:22,193 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-10T14:25:22,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-10T14:25:22,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:22,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:25:22,223 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-10T14:25:22,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T14:25:22,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 26 msec 2024-12-10T14:25:22,257 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-10T14:25:22,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T14:25:22,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-12-10T14:25:22,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-10T14:25:22,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-10T14:25:22,287 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.461sec 2024-12-10T14:25:22,289 INFO [master/db1d50717577:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T14:25:22,290 INFO [master/db1d50717577:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T14:25:22,291 INFO [master/db1d50717577:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T14:25:22,291 INFO [master/db1d50717577:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T14:25:22,291 INFO [master/db1d50717577:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T14:25:22,292 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,33823,1733840717045-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T14:25:22,293 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,33823,1733840717045-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T14:25:22,299 DEBUG [master/db1d50717577:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-10T14:25:22,300 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T14:25:22,300 INFO [master/db1d50717577:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db1d50717577,33823,1733840717045-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T14:25:22,308 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76523d14 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46873e4f 2024-12-10T14:25:22,309 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-10T14:25:22,315 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:22,318 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T14:25:22,318 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T14:25:22,327 DEBUG [hconnection-0x7edf53b1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:22,336 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:22,345 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=db1d50717577,33823,1733840717045 2024-12-10T14:25:22,357 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=55, ProcessCount=11, AvailableMemoryMB=3069 2024-12-10T14:25:22,367 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:25:22,370 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58658, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:25:22,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:25:22,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:25:22,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:22,402 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:25:22,403 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:22,405 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:25:22,406 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-10T14:25:22,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:25:22,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741837_1013 (size=963) 2024-12-10T14:25:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:25:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:25:22,830 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:25:22,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741838_1014 (size=53) 2024-12-10T14:25:23,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:25:23,240 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:23,240 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3cb281b62d072b2e7312c326c99dffff, disabling compactions & flushes 2024-12-10T14:25:23,240 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,240 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,240 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. after waiting 0 ms 2024-12-10T14:25:23,240 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,240 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,240 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:23,242 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:25:23,243 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733840723243"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733840723243"}]},"ts":"1733840723243"} 2024-12-10T14:25:23,246 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:25:23,247 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:25:23,248 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840723248"}]},"ts":"1733840723248"} 2024-12-10T14:25:23,250 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T14:25:23,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3cb281b62d072b2e7312c326c99dffff, ASSIGN}] 2024-12-10T14:25:23,257 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3cb281b62d072b2e7312c326c99dffff, ASSIGN 2024-12-10T14:25:23,259 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3cb281b62d072b2e7312c326c99dffff, ASSIGN; state=OFFLINE, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=false 2024-12-10T14:25:23,409 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3cb281b62d072b2e7312c326c99dffff, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:23,413 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:25:23,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:25:23,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:23,572 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,572 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:25:23,572 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,572 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:23,573 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,573 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,575 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,578 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:23,578 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3cb281b62d072b2e7312c326c99dffff columnFamilyName A 2024-12-10T14:25:23,578 DEBUG [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:23,579 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.HStore(327): Store=3cb281b62d072b2e7312c326c99dffff/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:23,579 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,581 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:23,581 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3cb281b62d072b2e7312c326c99dffff columnFamilyName B 2024-12-10T14:25:23,581 DEBUG [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:23,582 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.HStore(327): Store=3cb281b62d072b2e7312c326c99dffff/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:23,582 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,584 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:23,584 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3cb281b62d072b2e7312c326c99dffff columnFamilyName C 2024-12-10T14:25:23,584 DEBUG [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:23,585 INFO [StoreOpener-3cb281b62d072b2e7312c326c99dffff-1 {}] regionserver.HStore(327): Store=3cb281b62d072b2e7312c326c99dffff/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:23,586 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,587 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,587 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,590 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:25:23,592 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:23,595 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:25:23,596 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 3cb281b62d072b2e7312c326c99dffff; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61002394, jitterRate=-0.0909934937953949}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:25:23,597 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:23,598 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., pid=11, masterSystemTime=1733840723566 2024-12-10T14:25:23,601 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,601 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:23,602 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3cb281b62d072b2e7312c326c99dffff, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:23,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-10T14:25:23,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 in 192 msec 2024-12-10T14:25:23,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-10T14:25:23,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3cb281b62d072b2e7312c326c99dffff, ASSIGN in 353 msec 2024-12-10T14:25:23,612 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:25:23,613 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840723612"}]},"ts":"1733840723612"} 2024-12-10T14:25:23,615 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T14:25:23,619 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:25:23,621 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2200 sec 2024-12-10T14:25:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T14:25:24,526 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-10T14:25:24,531 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6fcb5f29 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fdf5682 2024-12-10T14:25:24,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6e36fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,537 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,541 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,544 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:25:24,546 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:25:24,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f2091cc to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d38d10 2024-12-10T14:25:24,558 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f343a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,559 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09bd0964 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c63ae4e 2024-12-10T14:25:24,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1324ee83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,563 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18cb251d to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@736f1673 2024-12-10T14:25:24,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@478bae6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,567 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45b55c24 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ee2166f 2024-12-10T14:25:24,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48068a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,571 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e52b42a to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f34ff67 2024-12-10T14:25:24,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38766d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,576 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09ed28bb to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b5cad1a 2024-12-10T14:25:24,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@295cb1ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,581 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12a1285d to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c3b736e 2024-12-10T14:25:24,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70267494, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,586 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bc462 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@767a8485 2024-12-10T14:25:24,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2a8e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,589 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47fe2fa7 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6502d571 2024-12-10T14:25:24,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c915d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:24,599 DEBUG [hconnection-0x23bc3a57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,605 DEBUG [hconnection-0x5e621c7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,605 DEBUG [hconnection-0x106fdee5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,605 DEBUG [hconnection-0x7c3cfe41-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,605 DEBUG [hconnection-0x4a22ee9a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,606 DEBUG [hconnection-0x134cb240-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:24,606 DEBUG [hconnection-0x293c9f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,607 DEBUG [hconnection-0x624e92fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,607 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,607 DEBUG [hconnection-0x17b3816e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:24,609 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,609 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,610 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,611 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,613 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-10T14:25:24,616 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:24,617 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:25:24,619 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:24,619 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44304, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:24,635 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:24,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:25:24,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:24,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:24,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:25:24,786 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/8ee59d6064f54e828cd79a60f6d91bad is 50, key is test_row_0/A:col10/1733840724667/Put/seqid=0 2024-12-10T14:25:24,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:24,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T14:25:24,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:24,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:24,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:24,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:24,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840784807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840784807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741839_1015 (size=12001) 2024-12-10T14:25:24,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840784809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,823 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/8ee59d6064f54e828cd79a60f6d91bad 2024-12-10T14:25:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840784813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840784817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/e8ba0b70cd4a4850911a78788d98007b is 50, key is test_row_0/B:col10/1733840724667/Put/seqid=0 2024-12-10T14:25:24,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:25:24,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741840_1016 (size=12001) 2024-12-10T14:25:24,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/e8ba0b70cd4a4850911a78788d98007b 2024-12-10T14:25:24,969 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:24,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T14:25:24,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:24,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:24,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:24,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:24,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:24,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840784962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840784964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840784964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:24,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/36f2a51773314d84a399522e2f680fd2 is 50, key is test_row_0/C:col10/1733840724667/Put/seqid=0 2024-12-10T14:25:24,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840784962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840784972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:24,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741841_1017 (size=12001) 2024-12-10T14:25:25,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:25,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T14:25:25,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:25,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840785181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840785185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840785186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840785186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840785185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:25:25,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:25,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T14:25:25,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:25,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/36f2a51773314d84a399522e2f680fd2 2024-12-10T14:25:25,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/8ee59d6064f54e828cd79a60f6d91bad as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/8ee59d6064f54e828cd79a60f6d91bad 2024-12-10T14:25:25,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/8ee59d6064f54e828cd79a60f6d91bad, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T14:25:25,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/e8ba0b70cd4a4850911a78788d98007b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e8ba0b70cd4a4850911a78788d98007b 2024-12-10T14:25:25,444 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:25,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T14:25:25,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:25,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e8ba0b70cd4a4850911a78788d98007b, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T14:25:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/36f2a51773314d84a399522e2f680fd2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/36f2a51773314d84a399522e2f680fd2 2024-12-10T14:25:25,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/36f2a51773314d84a399522e2f680fd2, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T14:25:25,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3cb281b62d072b2e7312c326c99dffff in 788ms, sequenceid=12, compaction requested=false 2024-12-10T14:25:25,471 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-10T14:25:25,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:25,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:25,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:25:25,502 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:25,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:25,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:25,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:25,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:25,503 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:25,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/df8b7b5229804b6b9be472dfc5cfbcb9 is 50, key is test_row_0/A:col10/1733840725499/Put/seqid=0 2024-12-10T14:25:25,524 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T14:25:25,525 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-10T14:25:25,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840785505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840785507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840785510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840785529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840785529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741842_1018 (size=16681) 2024-12-10T14:25:25,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/df8b7b5229804b6b9be472dfc5cfbcb9 2024-12-10T14:25:25,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b34d8a68242b4da4b58fcfb81f853cd7 is 50, key is test_row_0/B:col10/1733840725499/Put/seqid=0 2024-12-10T14:25:25,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741843_1019 (size=12001) 2024-12-10T14:25:25,600 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b34d8a68242b4da4b58fcfb81f853cd7 2024-12-10T14:25:25,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:25,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T14:25:25,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:25,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:25,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/f88802fa4f9647cc9b847d5d117165ec is 50, key is test_row_0/C:col10/1733840725499/Put/seqid=0 2024-12-10T14:25:25,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840785633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840785640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840785639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840785643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840785644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741844_1020 (size=12001) 2024-12-10T14:25:25,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/f88802fa4f9647cc9b847d5d117165ec 2024-12-10T14:25:25,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/df8b7b5229804b6b9be472dfc5cfbcb9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/df8b7b5229804b6b9be472dfc5cfbcb9 2024-12-10T14:25:25,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/df8b7b5229804b6b9be472dfc5cfbcb9, entries=250, sequenceid=41, filesize=16.3 K 2024-12-10T14:25:25,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b34d8a68242b4da4b58fcfb81f853cd7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b34d8a68242b4da4b58fcfb81f853cd7 2024-12-10T14:25:25,722 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b34d8a68242b4da4b58fcfb81f853cd7, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T14:25:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:25:25,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/f88802fa4f9647cc9b847d5d117165ec as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f88802fa4f9647cc9b847d5d117165ec 2024-12-10T14:25:25,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f88802fa4f9647cc9b847d5d117165ec, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T14:25:25,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 3cb281b62d072b2e7312c326c99dffff in 250ms, sequenceid=41, compaction requested=false 2024-12-10T14:25:25,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:25,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:25,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T14:25:25,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:25,761 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:25:25,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:25,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:25,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:25,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:25,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:25,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:25,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/83a91d9be4ed41ee928f346fd40ec2cc is 50, key is test_row_0/A:col10/1733840725504/Put/seqid=0 2024-12-10T14:25:25,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741845_1021 (size=12001) 2024-12-10T14:25:25,797 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/83a91d9be4ed41ee928f346fd40ec2cc 2024-12-10T14:25:25,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/4963e235156442968bc748378b051b57 is 50, key is test_row_0/B:col10/1733840725504/Put/seqid=0 2024-12-10T14:25:25,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741846_1022 (size=12001) 2024-12-10T14:25:25,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:25,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:25,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840785920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840785932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840785935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840785939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:25,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840785940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840786042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840786043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840786057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840786057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840786058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,241 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/4963e235156442968bc748378b051b57 2024-12-10T14:25:26,242 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T14:25:26,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840786253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840786256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840786265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840786265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/6da94d21da5247bb97b3d393c614dcc9 is 50, key is test_row_0/C:col10/1733840725504/Put/seqid=0 2024-12-10T14:25:26,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840786265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741847_1023 (size=12001) 2024-12-10T14:25:26,286 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/6da94d21da5247bb97b3d393c614dcc9 2024-12-10T14:25:26,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/83a91d9be4ed41ee928f346fd40ec2cc as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/83a91d9be4ed41ee928f346fd40ec2cc 2024-12-10T14:25:26,318 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/83a91d9be4ed41ee928f346fd40ec2cc, entries=150, sequenceid=48, filesize=11.7 K 2024-12-10T14:25:26,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/4963e235156442968bc748378b051b57 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/4963e235156442968bc748378b051b57 2024-12-10T14:25:26,334 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/4963e235156442968bc748378b051b57, entries=150, sequenceid=48, filesize=11.7 K 2024-12-10T14:25:26,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/6da94d21da5247bb97b3d393c614dcc9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6da94d21da5247bb97b3d393c614dcc9 2024-12-10T14:25:26,355 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6da94d21da5247bb97b3d393c614dcc9, entries=150, sequenceid=48, filesize=11.7 K 2024-12-10T14:25:26,358 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 3cb281b62d072b2e7312c326c99dffff in 597ms, sequenceid=48, compaction requested=true 2024-12-10T14:25:26,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:26,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:26,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-10T14:25:26,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-10T14:25:26,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-10T14:25:26,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7420 sec 2024-12-10T14:25:26,370 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.7610 sec 2024-12-10T14:25:26,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-10T14:25:26,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:26,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:26,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:26,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840786571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840786571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840786571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/1b08846939ae4fe18805bc803b564f9e is 50, key is test_row_0/A:col10/1733840726562/Put/seqid=0 2024-12-10T14:25:26,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840786577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840786577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741848_1024 (size=14341) 2024-12-10T14:25:26,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/1b08846939ae4fe18805bc803b564f9e 2024-12-10T14:25:26,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/eb970e9a17e24911acd6d9772d8e0025 is 50, key is test_row_0/B:col10/1733840726562/Put/seqid=0 2024-12-10T14:25:26,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741849_1025 (size=12001) 2024-12-10T14:25:26,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840786685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840786685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T14:25:26,728 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-10T14:25:26,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:26,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-10T14:25:26,736 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:26,738 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:26,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T14:25:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T14:25:26,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840786888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:26,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840786890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:26,893 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:26,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T14:25:26,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:26,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:26,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:26,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:26,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:26,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:27,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/eb970e9a17e24911acd6d9772d8e0025 2024-12-10T14:25:27,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T14:25:27,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:27,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T14:25:27,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:27,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:27,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:27,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:27,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:27,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:27,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/4afcb3589c704fc08f8808fa91241566 is 50, key is test_row_0/C:col10/1733840726562/Put/seqid=0 2024-12-10T14:25:27,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741850_1026 (size=12001) 2024-12-10T14:25:27,077 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/4afcb3589c704fc08f8808fa91241566 2024-12-10T14:25:27,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840787078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840787081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840787082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/1b08846939ae4fe18805bc803b564f9e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/1b08846939ae4fe18805bc803b564f9e 2024-12-10T14:25:27,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/1b08846939ae4fe18805bc803b564f9e, entries=200, sequenceid=78, filesize=14.0 K 2024-12-10T14:25:27,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/eb970e9a17e24911acd6d9772d8e0025 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/eb970e9a17e24911acd6d9772d8e0025 2024-12-10T14:25:27,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/eb970e9a17e24911acd6d9772d8e0025, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T14:25:27,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/4afcb3589c704fc08f8808fa91241566 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/4afcb3589c704fc08f8808fa91241566 2024-12-10T14:25:27,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/4afcb3589c704fc08f8808fa91241566, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T14:25:27,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 3cb281b62d072b2e7312c326c99dffff in 573ms, sequenceid=78, compaction requested=true 2024-12-10T14:25:27,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:27,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:27,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:27,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:27,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:27,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:27,145 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:25:27,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:27,145 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:25:27,150 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:25:27,152 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:27,152 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:27,152 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e8ba0b70cd4a4850911a78788d98007b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b34d8a68242b4da4b58fcfb81f853cd7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/4963e235156442968bc748378b051b57, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/eb970e9a17e24911acd6d9772d8e0025] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=46.9 K 2024-12-10T14:25:27,153 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55024 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:25:27,154 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:27,154 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:27,154 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e8ba0b70cd4a4850911a78788d98007b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733840724665 2024-12-10T14:25:27,154 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/8ee59d6064f54e828cd79a60f6d91bad, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/df8b7b5229804b6b9be472dfc5cfbcb9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/83a91d9be4ed41ee928f346fd40ec2cc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/1b08846939ae4fe18805bc803b564f9e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=53.7 K 2024-12-10T14:25:27,155 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b34d8a68242b4da4b58fcfb81f853cd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733840724803 2024-12-10T14:25:27,156 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ee59d6064f54e828cd79a60f6d91bad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733840724665 2024-12-10T14:25:27,156 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 4963e235156442968bc748378b051b57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733840725504 2024-12-10T14:25:27,156 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting df8b7b5229804b6b9be472dfc5cfbcb9, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733840724803 2024-12-10T14:25:27,157 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting eb970e9a17e24911acd6d9772d8e0025, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840725927 2024-12-10T14:25:27,157 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83a91d9be4ed41ee928f346fd40ec2cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733840725504 2024-12-10T14:25:27,159 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b08846939ae4fe18805bc803b564f9e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840725898 2024-12-10T14:25:27,207 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:27,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T14:25:27,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:27,208 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T14:25:27,208 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#13 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:27,209 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#12 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:27,210 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0937a42d2a834c75a571f23066425f78 is 50, key is test_row_0/B:col10/1733840726562/Put/seqid=0 2024-12-10T14:25:27,210 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/53aae04281384014b6729d4f091e9e65 is 50, key is test_row_0/A:col10/1733840726562/Put/seqid=0 2024-12-10T14:25:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:27,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:27,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/4fdea548b8464ccdaf71e315d9782a58 is 50, key is test_row_0/A:col10/1733840726572/Put/seqid=0 2024-12-10T14:25:27,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741851_1027 (size=12139) 2024-12-10T14:25:27,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741852_1028 (size=12139) 2024-12-10T14:25:27,292 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/53aae04281384014b6729d4f091e9e65 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/53aae04281384014b6729d4f091e9e65 2024-12-10T14:25:27,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741853_1029 (size=14337) 2024-12-10T14:25:27,297 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/4fdea548b8464ccdaf71e315d9782a58 2024-12-10T14:25:27,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/dd060bf6a1b04479847bd0cf05b829e4 is 50, key is test_row_0/B:col10/1733840726572/Put/seqid=0 2024-12-10T14:25:27,334 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into 53aae04281384014b6729d4f091e9e65(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:27,334 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:27,334 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=12, startTime=1733840727140; duration=0sec 2024-12-10T14:25:27,335 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:27,335 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:27,335 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:25:27,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T14:25:27,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840787336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840787341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,346 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:25:27,346 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:27,346 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:27,347 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/36f2a51773314d84a399522e2f680fd2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f88802fa4f9647cc9b847d5d117165ec, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6da94d21da5247bb97b3d393c614dcc9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/4afcb3589c704fc08f8808fa91241566] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=46.9 K 2024-12-10T14:25:27,348 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36f2a51773314d84a399522e2f680fd2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733840724665 2024-12-10T14:25:27,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741854_1030 (size=9657) 2024-12-10T14:25:27,350 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting f88802fa4f9647cc9b847d5d117165ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733840724803 2024-12-10T14:25:27,350 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/dd060bf6a1b04479847bd0cf05b829e4 2024-12-10T14:25:27,351 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6da94d21da5247bb97b3d393c614dcc9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733840725504 2024-12-10T14:25:27,352 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4afcb3589c704fc08f8808fa91241566, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840725927 2024-12-10T14:25:27,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/75d45f2ca3524098b9941df42b62eed7 is 50, key is test_row_0/C:col10/1733840726572/Put/seqid=0 2024-12-10T14:25:27,391 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#17 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:27,392 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/822b8114acb5401bbcff8c9b49648d89 is 50, key is test_row_0/C:col10/1733840726562/Put/seqid=0 2024-12-10T14:25:27,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741855_1031 (size=9657) 2024-12-10T14:25:27,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741856_1032 (size=12139) 2024-12-10T14:25:27,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840787447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840787447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,526 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T14:25:27,526 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-10T14:25:27,529 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-10T14:25:27,529 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-10T14:25:27,530 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T14:25:27,530 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-10T14:25:27,531 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-10T14:25:27,531 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-10T14:25:27,532 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T14:25:27,532 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-10T14:25:27,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840787653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840787654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,663 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0937a42d2a834c75a571f23066425f78 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0937a42d2a834c75a571f23066425f78 2024-12-10T14:25:27,675 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into 0937a42d2a834c75a571f23066425f78(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:27,676 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:27,676 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=12, startTime=1733840727145; duration=0sec 2024-12-10T14:25:27,676 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:27,676 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:27,804 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/75d45f2ca3524098b9941df42b62eed7 2024-12-10T14:25:27,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/4fdea548b8464ccdaf71e315d9782a58 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4fdea548b8464ccdaf71e315d9782a58 2024-12-10T14:25:27,824 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/822b8114acb5401bbcff8c9b49648d89 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/822b8114acb5401bbcff8c9b49648d89 2024-12-10T14:25:27,836 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4fdea548b8464ccdaf71e315d9782a58, entries=200, sequenceid=87, filesize=14.0 K 2024-12-10T14:25:27,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/dd060bf6a1b04479847bd0cf05b829e4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/dd060bf6a1b04479847bd0cf05b829e4 2024-12-10T14:25:27,840 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into 822b8114acb5401bbcff8c9b49648d89(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:27,840 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:27,840 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=12, startTime=1733840727145; duration=0sec 2024-12-10T14:25:27,840 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:27,841 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:27,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T14:25:27,853 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/dd060bf6a1b04479847bd0cf05b829e4, entries=100, sequenceid=87, filesize=9.4 K 2024-12-10T14:25:27,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/75d45f2ca3524098b9941df42b62eed7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/75d45f2ca3524098b9941df42b62eed7 2024-12-10T14:25:27,872 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/75d45f2ca3524098b9941df42b62eed7, entries=100, sequenceid=87, filesize=9.4 K 2024-12-10T14:25:27,875 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 3cb281b62d072b2e7312c326c99dffff in 667ms, sequenceid=87, compaction requested=false 2024-12-10T14:25:27,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:27,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:27,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-10T14:25:27,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-10T14:25:27,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-10T14:25:27,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1390 sec 2024-12-10T14:25:27,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.1520 sec 2024-12-10T14:25:27,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:27,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-10T14:25:27,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:27,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:27,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:27,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:27,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:27,970 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:27,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/b67c21a9dc6d488baa67e268e2937986 is 50, key is test_row_0/A:col10/1733840727965/Put/seqid=0 2024-12-10T14:25:27,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840787977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:27,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:27,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840787981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741857_1033 (size=14341) 2024-12-10T14:25:28,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840788084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840788086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840788086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840788087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840788091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840788289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840788289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/b67c21a9dc6d488baa67e268e2937986 2024-12-10T14:25:28,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b2afd298e90b4f8a9803993d35a7d286 is 50, key is test_row_0/B:col10/1733840727965/Put/seqid=0 2024-12-10T14:25:28,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741858_1034 (size=12001) 2024-12-10T14:25:28,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b2afd298e90b4f8a9803993d35a7d286 2024-12-10T14:25:28,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/8185a2c5a98c4eac86e3253711af5fa3 is 50, key is test_row_0/C:col10/1733840727965/Put/seqid=0 2024-12-10T14:25:28,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741859_1035 (size=12001) 2024-12-10T14:25:28,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/8185a2c5a98c4eac86e3253711af5fa3 2024-12-10T14:25:28,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/b67c21a9dc6d488baa67e268e2937986 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/b67c21a9dc6d488baa67e268e2937986 2024-12-10T14:25:28,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/b67c21a9dc6d488baa67e268e2937986, entries=200, sequenceid=120, filesize=14.0 K 2024-12-10T14:25:28,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b2afd298e90b4f8a9803993d35a7d286 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b2afd298e90b4f8a9803993d35a7d286 2024-12-10T14:25:28,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b2afd298e90b4f8a9803993d35a7d286, entries=150, sequenceid=120, filesize=11.7 K 2024-12-10T14:25:28,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/8185a2c5a98c4eac86e3253711af5fa3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/8185a2c5a98c4eac86e3253711af5fa3 2024-12-10T14:25:28,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/8185a2c5a98c4eac86e3253711af5fa3, entries=150, sequenceid=120, filesize=11.7 K 2024-12-10T14:25:28,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 3cb281b62d072b2e7312c326c99dffff in 622ms, sequenceid=120, compaction requested=true 2024-12-10T14:25:28,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:28,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:28,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:28,591 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:28,591 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:28,593 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:28,593 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40817 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:28,593 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:28,593 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:28,593 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:28,593 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:28,593 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0937a42d2a834c75a571f23066425f78, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/dd060bf6a1b04479847bd0cf05b829e4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b2afd298e90b4f8a9803993d35a7d286] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.0 K 2024-12-10T14:25:28,593 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/53aae04281384014b6729d4f091e9e65, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4fdea548b8464ccdaf71e315d9782a58, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/b67c21a9dc6d488baa67e268e2937986] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=39.9 K 2024-12-10T14:25:28,594 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 0937a42d2a834c75a571f23066425f78, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840725927 2024-12-10T14:25:28,594 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53aae04281384014b6729d4f091e9e65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840725927 2024-12-10T14:25:28,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:28,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:28,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:28,596 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting dd060bf6a1b04479847bd0cf05b829e4, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733840726572 2024-12-10T14:25:28,596 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fdea548b8464ccdaf71e315d9782a58, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733840726572 2024-12-10T14:25:28,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:28,597 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b67c21a9dc6d488baa67e268e2937986, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733840727318 2024-12-10T14:25:28,597 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b2afd298e90b4f8a9803993d35a7d286, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733840727318 2024-12-10T14:25:28,619 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:28,621 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/3dcf522234b9418a9ebf6914804bc5e7 is 50, key is test_row_0/B:col10/1733840727965/Put/seqid=0 2024-12-10T14:25:28,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:28,622 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:28,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:25:28,623 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/c5f9533855704aa39c8fa31fc7d5b469 is 50, key is test_row_0/A:col10/1733840727965/Put/seqid=0 2024-12-10T14:25:28,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:28,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:28,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:28,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:28,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:28,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:28,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/4e871c80de78401887ce5857a68c6f9e is 50, key is test_row_0/A:col10/1733840728620/Put/seqid=0 2024-12-10T14:25:28,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741860_1036 (size=12241) 2024-12-10T14:25:28,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741861_1037 (size=12241) 2024-12-10T14:25:28,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741862_1038 (size=9757) 2024-12-10T14:25:28,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/4e871c80de78401887ce5857a68c6f9e 2024-12-10T14:25:28,682 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/3dcf522234b9418a9ebf6914804bc5e7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/3dcf522234b9418a9ebf6914804bc5e7 2024-12-10T14:25:28,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840788691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840788695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,699 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into 3dcf522234b9418a9ebf6914804bc5e7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:28,699 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:28,699 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840728591; duration=0sec 2024-12-10T14:25:28,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/84a1a97fb2354c28adff0eabe74a4bb1 is 50, key is test_row_0/B:col10/1733840728620/Put/seqid=0 2024-12-10T14:25:28,702 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:28,702 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:28,702 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:28,712 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:28,713 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:28,713 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:28,713 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/822b8114acb5401bbcff8c9b49648d89, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/75d45f2ca3524098b9941df42b62eed7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/8185a2c5a98c4eac86e3253711af5fa3] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.0 K 2024-12-10T14:25:28,714 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 822b8114acb5401bbcff8c9b49648d89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840725927 2024-12-10T14:25:28,715 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 75d45f2ca3524098b9941df42b62eed7, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733840726572 2024-12-10T14:25:28,716 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8185a2c5a98c4eac86e3253711af5fa3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733840727318 2024-12-10T14:25:28,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741863_1039 (size=9757) 2024-12-10T14:25:28,740 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#25 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:28,741 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/e53381c4bba4497dbda1bb7a5fce2294 is 50, key is test_row_0/C:col10/1733840727965/Put/seqid=0 2024-12-10T14:25:28,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741864_1040 (size=12241) 2024-12-10T14:25:28,773 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/e53381c4bba4497dbda1bb7a5fce2294 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e53381c4bba4497dbda1bb7a5fce2294 2024-12-10T14:25:28,790 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into e53381c4bba4497dbda1bb7a5fce2294(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:28,790 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:28,790 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840728595; duration=0sec 2024-12-10T14:25:28,791 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:28,791 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:28,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840788797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:28,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840788803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T14:25:28,847 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-10T14:25:28,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:28,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-10T14:25:28,853 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T14:25:28,854 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:28,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:28,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T14:25:29,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840789003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,008 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:29,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:29,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:29,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840789012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,071 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/c5f9533855704aa39c8fa31fc7d5b469 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c5f9533855704aa39c8fa31fc7d5b469 2024-12-10T14:25:29,083 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into c5f9533855704aa39c8fa31fc7d5b469(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:29,083 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:29,083 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840728590; duration=0sec 2024-12-10T14:25:29,083 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:29,083 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:29,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/84a1a97fb2354c28adff0eabe74a4bb1 2024-12-10T14:25:29,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/04a17f072bc54e568921782e61228eb7 is 50, key is test_row_0/C:col10/1733840728620/Put/seqid=0 2024-12-10T14:25:29,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T14:25:29,163 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:29,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:29,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:29,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,165 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741865_1041 (size=9757) 2024-12-10T14:25:29,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/04a17f072bc54e568921782e61228eb7 2024-12-10T14:25:29,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/4e871c80de78401887ce5857a68c6f9e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4e871c80de78401887ce5857a68c6f9e 2024-12-10T14:25:29,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4e871c80de78401887ce5857a68c6f9e, entries=100, sequenceid=132, filesize=9.5 K 2024-12-10T14:25:29,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/84a1a97fb2354c28adff0eabe74a4bb1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/84a1a97fb2354c28adff0eabe74a4bb1 2024-12-10T14:25:29,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/84a1a97fb2354c28adff0eabe74a4bb1, entries=100, sequenceid=132, filesize=9.5 K 2024-12-10T14:25:29,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/04a17f072bc54e568921782e61228eb7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/04a17f072bc54e568921782e61228eb7 2024-12-10T14:25:29,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/04a17f072bc54e568921782e61228eb7, entries=100, sequenceid=132, filesize=9.5 K 2024-12-10T14:25:29,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3cb281b62d072b2e7312c326c99dffff in 615ms, sequenceid=132, compaction requested=false 2024-12-10T14:25:29,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:29,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:29,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T14:25:29,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:29,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:29,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:29,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:29,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:29,310 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:29,317 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:29,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/c1c7b2e9a1eb46ee962f5a5310761096 is 50, key is test_row_0/A:col10/1733840728693/Put/seqid=0 2024-12-10T14:25:29,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:29,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:29,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741866_1042 (size=12151) 2024-12-10T14:25:29,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840789333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840789335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840789440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840789441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T14:25:29,473 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:29,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:29,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:29,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:29,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:29,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:29,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840789643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840789646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/c1c7b2e9a1eb46ee962f5a5310761096 2024-12-10T14:25:29,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/1313997576f64cbba1b9bd6b9fe69e0e is 50, key is test_row_0/B:col10/1733840728693/Put/seqid=0 2024-12-10T14:25:29,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741867_1043 (size=12151) 2024-12-10T14:25:29,784 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:29,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:29,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:29,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,939 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:29,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:29,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:29,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:29,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:29,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840789948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840789951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T14:25:30,093 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:30,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840790094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:30,096 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:30,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:30,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:30,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840790107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840790107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,109 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4175 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:25:30,110 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:25:30,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/1313997576f64cbba1b9bd6b9fe69e0e 2024-12-10T14:25:30,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/e5169fb015304f1d9b74aed33ec15ea8 is 50, key is test_row_0/C:col10/1733840728693/Put/seqid=0 2024-12-10T14:25:30,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741868_1044 (size=12151) 2024-12-10T14:25:30,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/e5169fb015304f1d9b74aed33ec15ea8 2024-12-10T14:25:30,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/c1c7b2e9a1eb46ee962f5a5310761096 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c1c7b2e9a1eb46ee962f5a5310761096 2024-12-10T14:25:30,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c1c7b2e9a1eb46ee962f5a5310761096, entries=150, sequenceid=160, filesize=11.9 K 2024-12-10T14:25:30,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/1313997576f64cbba1b9bd6b9fe69e0e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1313997576f64cbba1b9bd6b9fe69e0e 2024-12-10T14:25:30,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1313997576f64cbba1b9bd6b9fe69e0e, entries=150, sequenceid=160, filesize=11.9 K 2024-12-10T14:25:30,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/e5169fb015304f1d9b74aed33ec15ea8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e5169fb015304f1d9b74aed33ec15ea8 2024-12-10T14:25:30,252 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:30,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:30,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:30,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,253 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:30,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:30,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e5169fb015304f1d9b74aed33ec15ea8, entries=150, sequenceid=160, filesize=11.9 K 2024-12-10T14:25:30,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:30,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3cb281b62d072b2e7312c326c99dffff in 946ms, sequenceid=160, compaction requested=true 2024-12-10T14:25:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:30,256 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:30,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:30,256 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:30,260 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:30,260 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:30,260 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,260 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c5f9533855704aa39c8fa31fc7d5b469, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4e871c80de78401887ce5857a68c6f9e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c1c7b2e9a1eb46ee962f5a5310761096] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.3 K 2024-12-10T14:25:30,261 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:30,261 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:30,261 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,261 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/3dcf522234b9418a9ebf6914804bc5e7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/84a1a97fb2354c28adff0eabe74a4bb1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1313997576f64cbba1b9bd6b9fe69e0e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.3 K 2024-12-10T14:25:30,262 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5f9533855704aa39c8fa31fc7d5b469, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733840727318 2024-12-10T14:25:30,263 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3dcf522234b9418a9ebf6914804bc5e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733840727318 2024-12-10T14:25:30,263 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e871c80de78401887ce5857a68c6f9e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733840728610 2024-12-10T14:25:30,263 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 84a1a97fb2354c28adff0eabe74a4bb1, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733840728610 2024-12-10T14:25:30,264 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1c7b2e9a1eb46ee962f5a5310761096, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733840728688 2024-12-10T14:25:30,264 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1313997576f64cbba1b9bd6b9fe69e0e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733840728688 2024-12-10T14:25:30,286 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#30 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:30,287 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/9e95cb791ff54c66b9b2b5c5433d5f6f is 50, key is test_row_0/B:col10/1733840728693/Put/seqid=0 2024-12-10T14:25:30,290 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#31 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:30,291 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/cc8c0b710ff44e51a1883116be73a74c is 50, key is test_row_0/A:col10/1733840728693/Put/seqid=0 2024-12-10T14:25:30,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741869_1045 (size=12493) 2024-12-10T14:25:30,336 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/9e95cb791ff54c66b9b2b5c5433d5f6f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9e95cb791ff54c66b9b2b5c5433d5f6f 2024-12-10T14:25:30,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741870_1046 (size=12493) 2024-12-10T14:25:30,350 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into 9e95cb791ff54c66b9b2b5c5433d5f6f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:30,351 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:30,351 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840730256; duration=0sec 2024-12-10T14:25:30,351 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:30,351 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:30,351 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/cc8c0b710ff44e51a1883116be73a74c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cc8c0b710ff44e51a1883116be73a74c 2024-12-10T14:25:30,352 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:30,356 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:30,356 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:30,356 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,356 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e53381c4bba4497dbda1bb7a5fce2294, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/04a17f072bc54e568921782e61228eb7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e5169fb015304f1d9b74aed33ec15ea8] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.3 K 2024-12-10T14:25:30,359 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e53381c4bba4497dbda1bb7a5fce2294, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733840727318 2024-12-10T14:25:30,360 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 04a17f072bc54e568921782e61228eb7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733840728610 2024-12-10T14:25:30,361 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e5169fb015304f1d9b74aed33ec15ea8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733840728688 2024-12-10T14:25:30,363 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into cc8c0b710ff44e51a1883116be73a74c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:30,363 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:30,363 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840730256; duration=0sec 2024-12-10T14:25:30,363 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:30,363 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:30,377 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#32 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:30,377 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/285590ad83964ec98bf8b9c09bb5a70e is 50, key is test_row_0/C:col10/1733840728693/Put/seqid=0 2024-12-10T14:25:30,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741871_1047 (size=12493) 2024-12-10T14:25:30,405 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/285590ad83964ec98bf8b9c09bb5a70e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/285590ad83964ec98bf8b9c09bb5a70e 2024-12-10T14:25:30,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:30,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T14:25:30,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,408 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:25:30,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:30,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:30,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:30,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:30,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:30,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:30,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/50298a230b584fb198f5a8f2167009e6 is 50, key is test_row_1/A:col10/1733840729329/Put/seqid=0 2024-12-10T14:25:30,425 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into 285590ad83964ec98bf8b9c09bb5a70e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:30,425 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:30,425 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840730256; duration=0sec 2024-12-10T14:25:30,425 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:30,425 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:30,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741872_1048 (size=9757) 2024-12-10T14:25:30,446 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/50298a230b584fb198f5a8f2167009e6 2024-12-10T14:25:30,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:30,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:30,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/377aeea3def04c7386f03d34e8b83134 is 50, key is test_row_1/B:col10/1733840729329/Put/seqid=0 2024-12-10T14:25:30,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741873_1049 (size=9757) 2024-12-10T14:25:30,492 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/377aeea3def04c7386f03d34e8b83134 2024-12-10T14:25:30,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840790503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840790507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/120ae275a2cd44f3b59c414329e3b445 is 50, key is test_row_1/C:col10/1733840729329/Put/seqid=0 2024-12-10T14:25:30,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741874_1050 (size=9757) 2024-12-10T14:25:30,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840790608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840790611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840790812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840790814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:30,944 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/120ae275a2cd44f3b59c414329e3b445 2024-12-10T14:25:30,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/50298a230b584fb198f5a8f2167009e6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/50298a230b584fb198f5a8f2167009e6 2024-12-10T14:25:30,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T14:25:30,961 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/50298a230b584fb198f5a8f2167009e6, entries=100, sequenceid=173, filesize=9.5 K 2024-12-10T14:25:30,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/377aeea3def04c7386f03d34e8b83134 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/377aeea3def04c7386f03d34e8b83134 2024-12-10T14:25:30,969 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/377aeea3def04c7386f03d34e8b83134, entries=100, sequenceid=173, filesize=9.5 K 2024-12-10T14:25:30,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/120ae275a2cd44f3b59c414329e3b445 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/120ae275a2cd44f3b59c414329e3b445 2024-12-10T14:25:30,979 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/120ae275a2cd44f3b59c414329e3b445, entries=100, sequenceid=173, filesize=9.5 K 2024-12-10T14:25:30,981 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3cb281b62d072b2e7312c326c99dffff in 573ms, sequenceid=173, compaction requested=false 2024-12-10T14:25:30,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:30,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:30,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-10T14:25:30,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-10T14:25:30,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-10T14:25:30,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1290 sec 2024-12-10T14:25:30,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.1360 sec 2024-12-10T14:25:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:31,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T14:25:31,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:31,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:31,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:31,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:31,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:31,161 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:31,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/2b032448d140492ab1e986303c79cef6 is 50, key is test_row_0/A:col10/1733840730505/Put/seqid=0 2024-12-10T14:25:31,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840791180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:31,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840791180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:31,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741875_1051 (size=12151) 2024-12-10T14:25:31,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840791294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:31,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840791294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:31,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840791497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:31,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840791498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:31,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/2b032448d140492ab1e986303c79cef6 2024-12-10T14:25:31,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/1780548396a24aa69b9d4ecd139a0ec2 is 50, key is test_row_0/B:col10/1733840730505/Put/seqid=0 2024-12-10T14:25:31,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741876_1052 (size=12151) 2024-12-10T14:25:31,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840791801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:31,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840791802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:32,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/1780548396a24aa69b9d4ecd139a0ec2 2024-12-10T14:25:32,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/0dbec229c8f74ea0b60f65e3e9e535b4 is 50, key is test_row_0/C:col10/1733840730505/Put/seqid=0 2024-12-10T14:25:32,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741877_1053 (size=12151) 2024-12-10T14:25:32,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/0dbec229c8f74ea0b60f65e3e9e535b4 2024-12-10T14:25:32,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/2b032448d140492ab1e986303c79cef6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2b032448d140492ab1e986303c79cef6 2024-12-10T14:25:32,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2b032448d140492ab1e986303c79cef6, entries=150, sequenceid=201, filesize=11.9 K 2024-12-10T14:25:32,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/1780548396a24aa69b9d4ecd139a0ec2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1780548396a24aa69b9d4ecd139a0ec2 2024-12-10T14:25:32,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1780548396a24aa69b9d4ecd139a0ec2, entries=150, sequenceid=201, filesize=11.9 K 2024-12-10T14:25:32,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/0dbec229c8f74ea0b60f65e3e9e535b4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/0dbec229c8f74ea0b60f65e3e9e535b4 2024-12-10T14:25:32,115 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/0dbec229c8f74ea0b60f65e3e9e535b4, entries=150, sequenceid=201, filesize=11.9 K 2024-12-10T14:25:32,117 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3cb281b62d072b2e7312c326c99dffff in 957ms, sequenceid=201, compaction requested=true 2024-12-10T14:25:32,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:32,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:32,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:32,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:32,117 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:32,118 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:32,121 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:32,121 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:32,121 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:32,121 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9e95cb791ff54c66b9b2b5c5433d5f6f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/377aeea3def04c7386f03d34e8b83134, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1780548396a24aa69b9d4ecd139a0ec2] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.6 K 2024-12-10T14:25:32,122 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:32,122 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:32,122 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:32,122 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e95cb791ff54c66b9b2b5c5433d5f6f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733840728688 2024-12-10T14:25:32,122 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cc8c0b710ff44e51a1883116be73a74c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/50298a230b584fb198f5a8f2167009e6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2b032448d140492ab1e986303c79cef6] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.6 K 2024-12-10T14:25:32,123 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 377aeea3def04c7386f03d34e8b83134, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733840729329 2024-12-10T14:25:32,123 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc8c0b710ff44e51a1883116be73a74c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733840728688 2024-12-10T14:25:32,123 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1780548396a24aa69b9d4ecd139a0ec2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733840730497 2024-12-10T14:25:32,124 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50298a230b584fb198f5a8f2167009e6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733840729329 2024-12-10T14:25:32,125 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b032448d140492ab1e986303c79cef6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733840730497 2024-12-10T14:25:32,143 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:32,144 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/a2f9d609cdb74fefb2566dc4aba5942d is 50, key is test_row_0/A:col10/1733840730505/Put/seqid=0 2024-12-10T14:25:32,146 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:32,147 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b09d8b8355db4b5bbb52e6670c6d48ed is 50, key is test_row_0/B:col10/1733840730505/Put/seqid=0 2024-12-10T14:25:32,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741878_1054 (size=12595) 2024-12-10T14:25:32,179 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/a2f9d609cdb74fefb2566dc4aba5942d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a2f9d609cdb74fefb2566dc4aba5942d 2024-12-10T14:25:32,199 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into a2f9d609cdb74fefb2566dc4aba5942d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:32,199 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:32,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741879_1055 (size=12595) 2024-12-10T14:25:32,199 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840732117; duration=0sec 2024-12-10T14:25:32,199 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:32,199 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:32,200 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:32,205 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:32,205 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:32,205 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:32,206 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/285590ad83964ec98bf8b9c09bb5a70e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/120ae275a2cd44f3b59c414329e3b445, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/0dbec229c8f74ea0b60f65e3e9e535b4] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=33.6 K 2024-12-10T14:25:32,208 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 285590ad83964ec98bf8b9c09bb5a70e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733840728688 2024-12-10T14:25:32,210 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 120ae275a2cd44f3b59c414329e3b445, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733840729329 2024-12-10T14:25:32,211 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b09d8b8355db4b5bbb52e6670c6d48ed as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b09d8b8355db4b5bbb52e6670c6d48ed 2024-12-10T14:25:32,212 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dbec229c8f74ea0b60f65e3e9e535b4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733840730497 2024-12-10T14:25:32,229 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into b09d8b8355db4b5bbb52e6670c6d48ed(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:32,229 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:32,230 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840732117; duration=0sec 2024-12-10T14:25:32,230 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:32,230 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:32,235 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#41 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:32,236 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/01251027cd8748959a0fa6beb01e2753 is 50, key is test_row_0/C:col10/1733840730505/Put/seqid=0 2024-12-10T14:25:32,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741880_1056 (size=12595) 2024-12-10T14:25:32,259 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/01251027cd8748959a0fa6beb01e2753 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/01251027cd8748959a0fa6beb01e2753 2024-12-10T14:25:32,267 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into 01251027cd8748959a0fa6beb01e2753(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:32,268 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:32,268 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840732118; duration=0sec 2024-12-10T14:25:32,268 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:32,268 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:32,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:25:32,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:32,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:32,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:32,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:32,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:32,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:32,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/759c557d399b48f0b7ddebc02190eb84 is 50, key is test_row_0/A:col10/1733840731176/Put/seqid=0 2024-12-10T14:25:32,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741881_1057 (size=12151) 2024-12-10T14:25:32,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:32,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840792367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:32,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:32,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840792367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:32,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:32,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840792471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:32,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:32,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840792475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:32,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:32,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840792675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:32,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:32,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840792678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:32,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/759c557d399b48f0b7ddebc02190eb84 2024-12-10T14:25:32,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/7b60d09a503a4ea8a67b39cd499980cf is 50, key is test_row_0/B:col10/1733840731176/Put/seqid=0 2024-12-10T14:25:32,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741882_1058 (size=12151) 2024-12-10T14:25:32,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/7b60d09a503a4ea8a67b39cd499980cf 2024-12-10T14:25:32,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/22a6869916a940c9a610df976d74344e is 50, key is test_row_0/C:col10/1733840731176/Put/seqid=0 2024-12-10T14:25:32,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741883_1059 (size=12151) 2024-12-10T14:25:32,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/22a6869916a940c9a610df976d74344e 2024-12-10T14:25:32,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/759c557d399b48f0b7ddebc02190eb84 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/759c557d399b48f0b7ddebc02190eb84 2024-12-10T14:25:32,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/759c557d399b48f0b7ddebc02190eb84, entries=150, sequenceid=215, filesize=11.9 K 2024-12-10T14:25:32,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/7b60d09a503a4ea8a67b39cd499980cf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/7b60d09a503a4ea8a67b39cd499980cf 2024-12-10T14:25:32,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/7b60d09a503a4ea8a67b39cd499980cf, entries=150, sequenceid=215, filesize=11.9 K 2024-12-10T14:25:32,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/22a6869916a940c9a610df976d74344e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/22a6869916a940c9a610df976d74344e 2024-12-10T14:25:32,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/22a6869916a940c9a610df976d74344e, entries=150, sequenceid=215, filesize=11.9 K 2024-12-10T14:25:32,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3cb281b62d072b2e7312c326c99dffff in 549ms, sequenceid=215, compaction requested=false 2024-12-10T14:25:32,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:32,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T14:25:32,961 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-10T14:25:32,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:32,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-10T14:25:32,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T14:25:32,966 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:32,967 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:32,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:32,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:32,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:25:32,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:32,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:32,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:32,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:32,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:32,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:32,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/cbfd671a78524d7f8b2f11ebcb1f55ba is 50, key is test_row_0/A:col10/1733840732981/Put/seqid=0 2024-12-10T14:25:32,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741884_1060 (size=14541) 2024-12-10T14:25:33,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840793000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840793001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T14:25:33,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840793104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840793105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,119 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:33,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T14:25:33,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:33,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T14:25:33,273 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:33,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T14:25:33,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:33,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,274 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840793307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840793311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/cbfd671a78524d7f8b2f11ebcb1f55ba 2024-12-10T14:25:33,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/bf440981ccb34b7097205797c9ea5383 is 50, key is test_row_0/B:col10/1733840732981/Put/seqid=0 2024-12-10T14:25:33,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741885_1061 (size=12151) 2024-12-10T14:25:33,427 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:33,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T14:25:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T14:25:33,581 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:33,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T14:25:33,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:33,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840793609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:33,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840793615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:33,734 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:33,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T14:25:33,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:33,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/bf440981ccb34b7097205797c9ea5383 2024-12-10T14:25:33,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/254eca82a47f471daa6dff57316b3f09 is 50, key is test_row_0/C:col10/1733840732981/Put/seqid=0 2024-12-10T14:25:33,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741886_1062 (size=12151) 2024-12-10T14:25:33,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/254eca82a47f471daa6dff57316b3f09 2024-12-10T14:25:33,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/cbfd671a78524d7f8b2f11ebcb1f55ba as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cbfd671a78524d7f8b2f11ebcb1f55ba 2024-12-10T14:25:33,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cbfd671a78524d7f8b2f11ebcb1f55ba, entries=200, sequenceid=241, filesize=14.2 K 2024-12-10T14:25:33,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/bf440981ccb34b7097205797c9ea5383 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bf440981ccb34b7097205797c9ea5383 2024-12-10T14:25:33,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bf440981ccb34b7097205797c9ea5383, entries=150, sequenceid=241, filesize=11.9 K 2024-12-10T14:25:33,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/254eca82a47f471daa6dff57316b3f09 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/254eca82a47f471daa6dff57316b3f09 2024-12-10T14:25:33,888 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:33,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T14:25:33,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:33,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/254eca82a47f471daa6dff57316b3f09, entries=150, sequenceid=241, filesize=11.9 K 2024-12-10T14:25:33,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:33,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3cb281b62d072b2e7312c326c99dffff in 910ms, sequenceid=241, compaction requested=true 2024-12-10T14:25:33,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:33,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:33,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:33,892 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:33,892 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:33,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:33,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:33,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:33,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:33,895 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:33,895 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:33,895 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:33,895 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,895 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:33,895 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a2f9d609cdb74fefb2566dc4aba5942d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/759c557d399b48f0b7ddebc02190eb84, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cbfd671a78524d7f8b2f11ebcb1f55ba] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=38.4 K 2024-12-10T14:25:33,895 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,895 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b09d8b8355db4b5bbb52e6670c6d48ed, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/7b60d09a503a4ea8a67b39cd499980cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bf440981ccb34b7097205797c9ea5383] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.0 K 2024-12-10T14:25:33,896 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2f9d609cdb74fefb2566dc4aba5942d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733840730497 2024-12-10T14:25:33,896 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b09d8b8355db4b5bbb52e6670c6d48ed, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733840730497 2024-12-10T14:25:33,896 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 759c557d399b48f0b7ddebc02190eb84, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840731176 2024-12-10T14:25:33,896 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b60d09a503a4ea8a67b39cd499980cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840731176 2024-12-10T14:25:33,897 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbfd671a78524d7f8b2f11ebcb1f55ba, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733840732362 2024-12-10T14:25:33,897 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting bf440981ccb34b7097205797c9ea5383, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733840732362 2024-12-10T14:25:33,910 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:33,911 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/96b5cd447187452b9ce1127802604f95 is 50, key is test_row_0/A:col10/1733840732981/Put/seqid=0 2024-12-10T14:25:33,912 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#49 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:33,913 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/ac10ea0378024dfbabdaeb67eae51c99 is 50, key is test_row_0/B:col10/1733840732981/Put/seqid=0 2024-12-10T14:25:33,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741888_1064 (size=12697) 2024-12-10T14:25:33,949 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/ac10ea0378024dfbabdaeb67eae51c99 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/ac10ea0378024dfbabdaeb67eae51c99 2024-12-10T14:25:33,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741887_1063 (size=12697) 2024-12-10T14:25:33,959 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into ac10ea0378024dfbabdaeb67eae51c99(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:33,959 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:33,959 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840733892; duration=0sec 2024-12-10T14:25:33,960 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:33,960 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:33,960 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:33,962 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:33,962 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:33,962 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:33,962 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/01251027cd8748959a0fa6beb01e2753, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/22a6869916a940c9a610df976d74344e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/254eca82a47f471daa6dff57316b3f09] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.0 K 2024-12-10T14:25:33,963 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 01251027cd8748959a0fa6beb01e2753, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733840730497 2024-12-10T14:25:33,964 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 22a6869916a940c9a610df976d74344e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840731176 2024-12-10T14:25:33,964 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 254eca82a47f471daa6dff57316b3f09, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733840732362 2024-12-10T14:25:33,987 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#50 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:33,988 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/5e3207c903e04b49ab003b5d14c99aa8 is 50, key is test_row_0/C:col10/1733840732981/Put/seqid=0 2024-12-10T14:25:34,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741889_1065 (size=12697) 2024-12-10T14:25:34,043 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:34,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T14:25:34,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:34,044 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T14:25:34,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:34,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:34,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:34,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:34,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:34,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:34,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6d06c88a831a4da485e3e7aacb6a54b4 is 50, key is test_row_0/A:col10/1733840732999/Put/seqid=0 2024-12-10T14:25:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T14:25:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741890_1066 (size=12151) 2024-12-10T14:25:34,087 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6d06c88a831a4da485e3e7aacb6a54b4 2024-12-10T14:25:34,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/bd87b6023c834aea9e221370411545f9 is 50, key is test_row_0/B:col10/1733840732999/Put/seqid=0 2024-12-10T14:25:34,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:34,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:34,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741891_1067 (size=12151) 2024-12-10T14:25:34,131 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/bd87b6023c834aea9e221370411545f9 2024-12-10T14:25:34,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/f225c747f7f84603a71be7073cdd9d38 is 50, key is test_row_0/C:col10/1733840732999/Put/seqid=0 2024-12-10T14:25:34,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741892_1068 (size=12151) 2024-12-10T14:25:34,167 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/f225c747f7f84603a71be7073cdd9d38 2024-12-10T14:25:34,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840794161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840794163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840794166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840794169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840794169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6d06c88a831a4da485e3e7aacb6a54b4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d06c88a831a4da485e3e7aacb6a54b4 2024-12-10T14:25:34,185 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d06c88a831a4da485e3e7aacb6a54b4, entries=150, sequenceid=252, filesize=11.9 K 2024-12-10T14:25:34,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/bd87b6023c834aea9e221370411545f9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bd87b6023c834aea9e221370411545f9 2024-12-10T14:25:34,193 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bd87b6023c834aea9e221370411545f9, entries=150, sequenceid=252, filesize=11.9 K 2024-12-10T14:25:34,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/f225c747f7f84603a71be7073cdd9d38 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f225c747f7f84603a71be7073cdd9d38 2024-12-10T14:25:34,202 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f225c747f7f84603a71be7073cdd9d38, entries=150, sequenceid=252, filesize=11.9 K 2024-12-10T14:25:34,203 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 3cb281b62d072b2e7312c326c99dffff in 159ms, sequenceid=252, compaction requested=false 2024-12-10T14:25:34,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:34,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:34,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-10T14:25:34,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-10T14:25:34,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-10T14:25:34,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2390 sec 2024-12-10T14:25:34,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.2490 sec 2024-12-10T14:25:34,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:34,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-10T14:25:34,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:34,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:34,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:34,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:34,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:34,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:34,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840794280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840794277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840794281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840794283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840794284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/fd5513509ad04e3dbbb2158be3b9efec is 50, key is test_row_0/A:col10/1733840734274/Put/seqid=0 2024-12-10T14:25:34,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741893_1069 (size=14741) 2024-12-10T14:25:34,364 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/96b5cd447187452b9ce1127802604f95 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/96b5cd447187452b9ce1127802604f95 2024-12-10T14:25:34,372 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into 96b5cd447187452b9ce1127802604f95(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:34,372 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:34,372 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840733892; duration=0sec 2024-12-10T14:25:34,372 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:34,372 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:34,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840794386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840794386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840794386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840794389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840794389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,436 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/5e3207c903e04b49ab003b5d14c99aa8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5e3207c903e04b49ab003b5d14c99aa8 2024-12-10T14:25:34,444 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into 5e3207c903e04b49ab003b5d14c99aa8(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:34,444 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:34,444 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840733894; duration=0sec 2024-12-10T14:25:34,444 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:34,444 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:34,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840794589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840794590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840794590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840794592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840794592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/fd5513509ad04e3dbbb2158be3b9efec 2024-12-10T14:25:34,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/f2bddac2595840ce8d20c5d3be04fe69 is 50, key is test_row_0/B:col10/1733840734274/Put/seqid=0 2024-12-10T14:25:34,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741894_1070 (size=12301) 2024-12-10T14:25:34,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/f2bddac2595840ce8d20c5d3be04fe69 2024-12-10T14:25:34,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/c0096be60c894b2ea0aedb1ff54f195a is 50, key is test_row_0/C:col10/1733840734274/Put/seqid=0 2024-12-10T14:25:34,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741895_1071 (size=12301) 2024-12-10T14:25:34,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840794892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840794893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840794894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840794896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:34,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840794896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T14:25:35,070 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-10T14:25:35,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-10T14:25:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:25:35,073 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:35,075 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:35,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:35,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/c0096be60c894b2ea0aedb1ff54f195a 2024-12-10T14:25:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:25:35,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/fd5513509ad04e3dbbb2158be3b9efec as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/fd5513509ad04e3dbbb2158be3b9efec 2024-12-10T14:25:35,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/fd5513509ad04e3dbbb2158be3b9efec, entries=200, sequenceid=281, filesize=14.4 K 2024-12-10T14:25:35,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/f2bddac2595840ce8d20c5d3be04fe69 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/f2bddac2595840ce8d20c5d3be04fe69 2024-12-10T14:25:35,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/f2bddac2595840ce8d20c5d3be04fe69, entries=150, sequenceid=281, filesize=12.0 K 2024-12-10T14:25:35,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/c0096be60c894b2ea0aedb1ff54f195a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/c0096be60c894b2ea0aedb1ff54f195a 2024-12-10T14:25:35,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/c0096be60c894b2ea0aedb1ff54f195a, entries=150, sequenceid=281, filesize=12.0 K 2024-12-10T14:25:35,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 3cb281b62d072b2e7312c326c99dffff in 933ms, sequenceid=281, compaction requested=true 2024-12-10T14:25:35,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:35,206 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:35,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:35,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:35,208 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:35,208 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:35,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:35,208 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:35,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:35,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:35,208 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:35,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:35,208 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/96b5cd447187452b9ce1127802604f95, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d06c88a831a4da485e3e7aacb6a54b4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/fd5513509ad04e3dbbb2158be3b9efec] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=38.7 K 2024-12-10T14:25:35,209 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96b5cd447187452b9ce1127802604f95, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733840732362 2024-12-10T14:25:35,210 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:35,210 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:35,210 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:35,210 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/ac10ea0378024dfbabdaeb67eae51c99, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bd87b6023c834aea9e221370411545f9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/f2bddac2595840ce8d20c5d3be04fe69] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.3 K 2024-12-10T14:25:35,210 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d06c88a831a4da485e3e7aacb6a54b4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733840732988 2024-12-10T14:25:35,211 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ac10ea0378024dfbabdaeb67eae51c99, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733840732362 2024-12-10T14:25:35,211 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting bd87b6023c834aea9e221370411545f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733840732988 2024-12-10T14:25:35,211 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd5513509ad04e3dbbb2158be3b9efec, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733840734155 2024-12-10T14:25:35,212 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f2bddac2595840ce8d20c5d3be04fe69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733840734163 2024-12-10T14:25:35,231 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:35,231 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:35,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T14:25:35,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:35,232 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:25:35,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:35,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:35,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:35,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:35,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:35,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:35,234 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b142622386af4bd3b5eebfcd91b7d6df is 50, key is test_row_0/B:col10/1733840734274/Put/seqid=0 2024-12-10T14:25:35,238 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:35,239 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/0905db215254437cae9badc24347f8c5 is 50, key is test_row_0/A:col10/1733840734274/Put/seqid=0 2024-12-10T14:25:35,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/a17bcae373654221b80caff61239aeae is 50, key is test_row_0/A:col10/1733840734277/Put/seqid=0 2024-12-10T14:25:35,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741896_1072 (size=12949) 2024-12-10T14:25:35,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741897_1073 (size=12949) 2024-12-10T14:25:35,282 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b142622386af4bd3b5eebfcd91b7d6df as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b142622386af4bd3b5eebfcd91b7d6df 2024-12-10T14:25:35,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741898_1074 (size=12301) 2024-12-10T14:25:35,289 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/0905db215254437cae9badc24347f8c5 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/0905db215254437cae9badc24347f8c5 2024-12-10T14:25:35,293 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into b142622386af4bd3b5eebfcd91b7d6df(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:35,293 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:35,293 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840735207; duration=0sec 2024-12-10T14:25:35,293 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:35,293 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:35,293 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:35,297 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:35,297 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:35,297 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:35,297 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5e3207c903e04b49ab003b5d14c99aa8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f225c747f7f84603a71be7073cdd9d38, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/c0096be60c894b2ea0aedb1ff54f195a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.3 K 2024-12-10T14:25:35,298 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into 0905db215254437cae9badc24347f8c5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:35,298 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:35,298 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840735206; duration=0sec 2024-12-10T14:25:35,298 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:35,298 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:35,299 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e3207c903e04b49ab003b5d14c99aa8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733840732362 2024-12-10T14:25:35,299 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f225c747f7f84603a71be7073cdd9d38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733840732988 2024-12-10T14:25:35,300 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c0096be60c894b2ea0aedb1ff54f195a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733840734163 2024-12-10T14:25:35,312 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:35,313 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/49bb266da6f94d2987e349719779e38a is 50, key is test_row_0/C:col10/1733840734274/Put/seqid=0 2024-12-10T14:25:35,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741899_1075 (size=12949) 2024-12-10T14:25:35,337 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/49bb266da6f94d2987e349719779e38a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/49bb266da6f94d2987e349719779e38a 2024-12-10T14:25:35,346 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into 49bb266da6f94d2987e349719779e38a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:35,346 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:35,346 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840735208; duration=0sec 2024-12-10T14:25:35,347 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:35,347 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:25:35,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:35,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:35,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840795440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840795441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840795441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840795445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840795446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840795548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840795548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840795549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840795549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840795549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:25:35,685 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/a17bcae373654221b80caff61239aeae 2024-12-10T14:25:35,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/392847ade0404aa2968c732e3eaece1f is 50, key is test_row_0/B:col10/1733840734277/Put/seqid=0 2024-12-10T14:25:35,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741900_1076 (size=12301) 2024-12-10T14:25:35,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840795752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840795752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840795752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840795757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:35,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840795757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840796056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840796059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840796060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840796059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840796062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,101 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/392847ade0404aa2968c732e3eaece1f 2024-12-10T14:25:36,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/3dff2e8c5e294a319d41aae919ef6241 is 50, key is test_row_0/C:col10/1733840734277/Put/seqid=0 2024-12-10T14:25:36,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741901_1077 (size=12301) 2024-12-10T14:25:36,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:25:36,524 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/3dff2e8c5e294a319d41aae919ef6241 2024-12-10T14:25:36,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/a17bcae373654221b80caff61239aeae as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a17bcae373654221b80caff61239aeae 2024-12-10T14:25:36,556 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a17bcae373654221b80caff61239aeae, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T14:25:36,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/392847ade0404aa2968c732e3eaece1f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/392847ade0404aa2968c732e3eaece1f 2024-12-10T14:25:36,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840796562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,563 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/392847ade0404aa2968c732e3eaece1f, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T14:25:36,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/3dff2e8c5e294a319d41aae919ef6241 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/3dff2e8c5e294a319d41aae919ef6241 2024-12-10T14:25:36,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840796563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840796563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840796567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840796568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:36,574 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/3dff2e8c5e294a319d41aae919ef6241, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T14:25:36,575 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for 3cb281b62d072b2e7312c326c99dffff in 1343ms, sequenceid=290, compaction requested=false 2024-12-10T14:25:36,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:36,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:36,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-10T14:25:36,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-10T14:25:36,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-10T14:25:36,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5030 sec 2024-12-10T14:25:36,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.5090 sec 2024-12-10T14:25:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T14:25:37,179 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-10T14:25:37,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-10T14:25:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T14:25:37,183 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:37,184 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:37,184 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T14:25:37,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:37,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T14:25:37,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:37,337 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-10T14:25:37,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:37,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:37,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:37,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:37,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/71be7a02b3cd4e589d5b20fd5c971eb7 is 50, key is test_row_0/A:col10/1733840735438/Put/seqid=0 2024-12-10T14:25:37,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741902_1078 (size=12301) 2024-12-10T14:25:37,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T14:25:37,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:37,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:37,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840797574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840797574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840797576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840797576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840797578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840797678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840797679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,751 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/71be7a02b3cd4e589d5b20fd5c971eb7 2024-12-10T14:25:37,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0a211c24acc54630800e3044990f338e is 50, key is test_row_0/B:col10/1733840735438/Put/seqid=0 2024-12-10T14:25:37,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741903_1079 (size=12301) 2024-12-10T14:25:37,766 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0a211c24acc54630800e3044990f338e 2024-12-10T14:25:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/43c088657aeb4a3dbf9d8a63faad61c7 is 50, key is test_row_0/C:col10/1733840735438/Put/seqid=0 2024-12-10T14:25:37,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741904_1080 (size=12301) 2024-12-10T14:25:37,780 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/43c088657aeb4a3dbf9d8a63faad61c7 2024-12-10T14:25:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T14:25:37,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/71be7a02b3cd4e589d5b20fd5c971eb7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/71be7a02b3cd4e589d5b20fd5c971eb7 2024-12-10T14:25:37,794 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/71be7a02b3cd4e589d5b20fd5c971eb7, entries=150, sequenceid=322, filesize=12.0 K 2024-12-10T14:25:37,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0a211c24acc54630800e3044990f338e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0a211c24acc54630800e3044990f338e 2024-12-10T14:25:37,800 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0a211c24acc54630800e3044990f338e, entries=150, sequenceid=322, filesize=12.0 K 2024-12-10T14:25:37,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/43c088657aeb4a3dbf9d8a63faad61c7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/43c088657aeb4a3dbf9d8a63faad61c7 2024-12-10T14:25:37,808 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/43c088657aeb4a3dbf9d8a63faad61c7, entries=150, sequenceid=322, filesize=12.0 K 2024-12-10T14:25:37,810 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 3cb281b62d072b2e7312c326c99dffff in 473ms, sequenceid=322, compaction requested=true 2024-12-10T14:25:37,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:37,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:37,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-10T14:25:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-10T14:25:37,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-10T14:25:37,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 628 msec 2024-12-10T14:25:37,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 634 msec 2024-12-10T14:25:37,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:37,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:25:37,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:37,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:37,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:37,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:37,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:37,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:37,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/12f9a9e03e3742b99256664c1a3d729b is 50, key is test_row_0/A:col10/1733840737884/Put/seqid=0 2024-12-10T14:25:37,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741905_1081 (size=14741) 2024-12-10T14:25:37,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840797952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:37,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:37,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840797953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:38,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840798054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:38,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840798059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:38,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840798257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:38,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840798263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:38,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T14:25:38,286 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-10T14:25:38,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-10T14:25:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T14:25:38,290 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:38,290 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:38,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:38,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/12f9a9e03e3742b99256664c1a3d729b 2024-12-10T14:25:38,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/af7d12f2d56846e380c72488677dff2a is 50, key is test_row_0/B:col10/1733840737884/Put/seqid=0 2024-12-10T14:25:38,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741906_1082 (size=12301) 2024-12-10T14:25:38,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/af7d12f2d56846e380c72488677dff2a 2024-12-10T14:25:38,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/5b049c328a614c0aba101ff5c279ffa4 is 50, key is test_row_0/C:col10/1733840737884/Put/seqid=0 2024-12-10T14:25:38,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741907_1083 (size=12301) 2024-12-10T14:25:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T14:25:38,444 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:38,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-10T14:25:38,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:38,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:38,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840798560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:38,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:38,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840798568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:38,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T14:25:38,599 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:38,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-10T14:25:38,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:38,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,753 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:38,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-10T14:25:38,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:38,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:38,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/5b049c328a614c0aba101ff5c279ffa4 2024-12-10T14:25:38,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/12f9a9e03e3742b99256664c1a3d729b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/12f9a9e03e3742b99256664c1a3d729b 2024-12-10T14:25:38,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/12f9a9e03e3742b99256664c1a3d729b, entries=200, sequenceid=333, filesize=14.4 K 2024-12-10T14:25:38,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/af7d12f2d56846e380c72488677dff2a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/af7d12f2d56846e380c72488677dff2a 2024-12-10T14:25:38,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/af7d12f2d56846e380c72488677dff2a, entries=150, sequenceid=333, filesize=12.0 K 2024-12-10T14:25:38,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/5b049c328a614c0aba101ff5c279ffa4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5b049c328a614c0aba101ff5c279ffa4 2024-12-10T14:25:38,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5b049c328a614c0aba101ff5c279ffa4, entries=150, sequenceid=333, filesize=12.0 K 2024-12-10T14:25:38,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 3cb281b62d072b2e7312c326c99dffff in 899ms, sequenceid=333, compaction requested=true 2024-12-10T14:25:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:38,787 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:25:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:38,787 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:25:38,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:38,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:38,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:38,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:38,789 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:25:38,789 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:38,789 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,789 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/0905db215254437cae9badc24347f8c5, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a17bcae373654221b80caff61239aeae, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/71be7a02b3cd4e589d5b20fd5c971eb7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/12f9a9e03e3742b99256664c1a3d729b] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=51.1 K 2024-12-10T14:25:38,790 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:25:38,790 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0905db215254437cae9badc24347f8c5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733840734163 2024-12-10T14:25:38,790 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:38,790 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,791 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b142622386af4bd3b5eebfcd91b7d6df, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/392847ade0404aa2968c732e3eaece1f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0a211c24acc54630800e3044990f338e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/af7d12f2d56846e380c72488677dff2a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=48.7 K 2024-12-10T14:25:38,791 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a17bcae373654221b80caff61239aeae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840734277 2024-12-10T14:25:38,791 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b142622386af4bd3b5eebfcd91b7d6df, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733840734163 2024-12-10T14:25:38,791 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71be7a02b3cd4e589d5b20fd5c971eb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1733840735438 2024-12-10T14:25:38,792 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12f9a9e03e3742b99256664c1a3d729b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733840737571 2024-12-10T14:25:38,792 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 392847ade0404aa2968c732e3eaece1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840734277 2024-12-10T14:25:38,792 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a211c24acc54630800e3044990f338e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1733840735438 2024-12-10T14:25:38,793 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting af7d12f2d56846e380c72488677dff2a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733840737571 2024-12-10T14:25:38,807 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:38,807 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:38,808 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/2e6e6fa389d741e59e011cb78dbb2d1b is 50, key is test_row_0/A:col10/1733840737884/Put/seqid=0 2024-12-10T14:25:38,808 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/e20f84e12cad4388843c5d35f332ea8f is 50, key is test_row_0/B:col10/1733840737884/Put/seqid=0 2024-12-10T14:25:38,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741908_1084 (size=13085) 2024-12-10T14:25:38,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741909_1085 (size=13085) 2024-12-10T14:25:38,841 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/2e6e6fa389d741e59e011cb78dbb2d1b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2e6e6fa389d741e59e011cb78dbb2d1b 2024-12-10T14:25:38,841 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/e20f84e12cad4388843c5d35f332ea8f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e20f84e12cad4388843c5d35f332ea8f 2024-12-10T14:25:38,849 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into e20f84e12cad4388843c5d35f332ea8f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:38,849 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into 2e6e6fa389d741e59e011cb78dbb2d1b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:38,849 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:38,849 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:38,850 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=12, startTime=1733840738787; duration=0sec 2024-12-10T14:25:38,850 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=12, startTime=1733840738787; duration=0sec 2024-12-10T14:25:38,850 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:38,850 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:38,850 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:38,850 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:38,850 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:25:38,851 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:25:38,852 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:38,852 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,852 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/49bb266da6f94d2987e349719779e38a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/3dff2e8c5e294a319d41aae919ef6241, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/43c088657aeb4a3dbf9d8a63faad61c7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5b049c328a614c0aba101ff5c279ffa4] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=48.7 K 2024-12-10T14:25:38,852 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49bb266da6f94d2987e349719779e38a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733840734163 2024-12-10T14:25:38,853 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dff2e8c5e294a319d41aae919ef6241, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840734277 2024-12-10T14:25:38,853 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43c088657aeb4a3dbf9d8a63faad61c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1733840735438 2024-12-10T14:25:38,854 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b049c328a614c0aba101ff5c279ffa4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733840737571 2024-12-10T14:25:38,866 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#71 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:38,867 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/5d6a2825558549a197cf9badac03a139 is 50, key is test_row_0/C:col10/1733840737884/Put/seqid=0 2024-12-10T14:25:38,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741910_1086 (size=13085) 2024-12-10T14:25:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T14:25:38,906 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:38,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-10T14:25:38,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:38,908 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:25:38,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:38,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:38,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:38,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:38,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:38,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:38,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/5e720df185764364b5e8fd4dc7a8410c is 50, key is test_row_0/A:col10/1733840737949/Put/seqid=0 2024-12-10T14:25:38,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741911_1087 (size=12301) 2024-12-10T14:25:38,941 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/5e720df185764364b5e8fd4dc7a8410c 2024-12-10T14:25:38,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/d82e08461d5b48edaac427bfb0c406cb is 50, key is test_row_0/B:col10/1733840737949/Put/seqid=0 2024-12-10T14:25:38,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741912_1088 (size=12301) 2024-12-10T14:25:38,966 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/d82e08461d5b48edaac427bfb0c406cb 2024-12-10T14:25:38,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/1008ddc06a5f4a189d78867697a3cfc0 is 50, key is test_row_0/C:col10/1733840737949/Put/seqid=0 2024-12-10T14:25:38,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741913_1089 (size=12301) 2024-12-10T14:25:39,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:39,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840799076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840799077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840799179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840799180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,280 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/5d6a2825558549a197cf9badac03a139 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5d6a2825558549a197cf9badac03a139 2024-12-10T14:25:39,287 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into 5d6a2825558549a197cf9badac03a139(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:39,287 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:39,287 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=12, startTime=1733840738788; duration=0sec 2024-12-10T14:25:39,287 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:39,287 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:39,383 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/1008ddc06a5f4a189d78867697a3cfc0 2024-12-10T14:25:39,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840799384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840799384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/5e720df185764364b5e8fd4dc7a8410c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5e720df185764364b5e8fd4dc7a8410c 2024-12-10T14:25:39,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T14:25:39,396 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5e720df185764364b5e8fd4dc7a8410c, entries=150, sequenceid=361, filesize=12.0 K 2024-12-10T14:25:39,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/d82e08461d5b48edaac427bfb0c406cb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/d82e08461d5b48edaac427bfb0c406cb 2024-12-10T14:25:39,406 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/d82e08461d5b48edaac427bfb0c406cb, entries=150, sequenceid=361, filesize=12.0 K 2024-12-10T14:25:39,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/1008ddc06a5f4a189d78867697a3cfc0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1008ddc06a5f4a189d78867697a3cfc0 2024-12-10T14:25:39,413 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1008ddc06a5f4a189d78867697a3cfc0, entries=150, sequenceid=361, filesize=12.0 K 2024-12-10T14:25:39,414 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3cb281b62d072b2e7312c326c99dffff in 507ms, sequenceid=361, compaction requested=false 2024-12-10T14:25:39,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:39,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:39,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-10T14:25:39,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-10T14:25:39,417 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-10T14:25:39,417 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1240 sec 2024-12-10T14:25:39,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.1310 sec 2024-12-10T14:25:39,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:39,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:25:39,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:39,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:39,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:39,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:39,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:39,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:39,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/69d8f0834b8247318b529850c524cc6e is 50, key is test_row_0/A:col10/1733840739581/Put/seqid=0 2024-12-10T14:25:39,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741914_1090 (size=17181) 2024-12-10T14:25:39,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840799610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840799613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840799613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840799686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840799692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840799715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840799718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840799718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840799918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840799921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840799921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:39,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/69d8f0834b8247318b529850c524cc6e 2024-12-10T14:25:40,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/67f16c6230764b67b5fae9fca9acc6e1 is 50, key is test_row_0/B:col10/1733840739581/Put/seqid=0 2024-12-10T14:25:40,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741915_1091 (size=12301) 2024-12-10T14:25:40,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840800190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840800195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840800222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840800224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840800226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T14:25:40,394 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-10T14:25:40,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:40,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-10T14:25:40,398 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:40,398 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:40,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:40,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T14:25:40,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/67f16c6230764b67b5fae9fca9acc6e1 2024-12-10T14:25:40,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/1a5524d8129b4673a217d15d9da88b90 is 50, key is test_row_0/C:col10/1733840739581/Put/seqid=0 2024-12-10T14:25:40,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741916_1092 (size=12301) 2024-12-10T14:25:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T14:25:40,550 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:40,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-10T14:25:40,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:40,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:40,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:40,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:40,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:40,703 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:40,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-10T14:25:40,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:40,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:40,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:40,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:40,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T14:25:40,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840800726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840800730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840800730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:40,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/1a5524d8129b4673a217d15d9da88b90 2024-12-10T14:25:40,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/69d8f0834b8247318b529850c524cc6e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/69d8f0834b8247318b529850c524cc6e 2024-12-10T14:25:40,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/69d8f0834b8247318b529850c524cc6e, entries=250, sequenceid=374, filesize=16.8 K 2024-12-10T14:25:40,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/67f16c6230764b67b5fae9fca9acc6e1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/67f16c6230764b67b5fae9fca9acc6e1 2024-12-10T14:25:40,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/67f16c6230764b67b5fae9fca9acc6e1, entries=150, sequenceid=374, filesize=12.0 K 2024-12-10T14:25:40,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/1a5524d8129b4673a217d15d9da88b90 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1a5524d8129b4673a217d15d9da88b90 2024-12-10T14:25:40,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1a5524d8129b4673a217d15d9da88b90, entries=150, sequenceid=374, filesize=12.0 K 2024-12-10T14:25:40,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3cb281b62d072b2e7312c326c99dffff in 1273ms, sequenceid=374, compaction requested=true 2024-12-10T14:25:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:40,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:40,856 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:40,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:40,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:40,857 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:40,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:40,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-10T14:25:40,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:40,857 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T14:25:40,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:40,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:40,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:40,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:40,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:40,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:40,858 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42567 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:40,858 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:40,858 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:40,858 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:40,858 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2e6e6fa389d741e59e011cb78dbb2d1b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5e720df185764364b5e8fd4dc7a8410c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/69d8f0834b8247318b529850c524cc6e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=41.6 K 2024-12-10T14:25:40,858 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:40,858 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:40,859 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e20f84e12cad4388843c5d35f332ea8f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/d82e08461d5b48edaac427bfb0c406cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/67f16c6230764b67b5fae9fca9acc6e1] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.8 K 2024-12-10T14:25:40,860 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e20f84e12cad4388843c5d35f332ea8f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733840737571 2024-12-10T14:25:40,860 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e6e6fa389d741e59e011cb78dbb2d1b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733840737571 2024-12-10T14:25:40,860 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting d82e08461d5b48edaac427bfb0c406cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1733840737945 2024-12-10T14:25:40,860 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e720df185764364b5e8fd4dc7a8410c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1733840737945 2024-12-10T14:25:40,860 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 67f16c6230764b67b5fae9fca9acc6e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733840739076 2024-12-10T14:25:40,860 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69d8f0834b8247318b529850c524cc6e, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733840739074 2024-12-10T14:25:40,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/48889bff4ec34a658cbaa3dbbb69647e is 50, key is test_row_0/A:col10/1733840739611/Put/seqid=0 2024-12-10T14:25:40,874 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#79 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:40,875 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/91e6db4c26314a7bbeee533fec5e82ce is 50, key is test_row_0/B:col10/1733840739581/Put/seqid=0 2024-12-10T14:25:40,875 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:40,876 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6a10b03cb35849b6b32bc9f7434dd515 is 50, key is test_row_0/A:col10/1733840739581/Put/seqid=0 2024-12-10T14:25:40,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741917_1093 (size=12301) 2024-12-10T14:25:40,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741918_1094 (size=13187) 2024-12-10T14:25:40,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741919_1095 (size=13187) 2024-12-10T14:25:41,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T14:25:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:41,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:41,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840801210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840801213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,288 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/48889bff4ec34a658cbaa3dbbb69647e 2024-12-10T14:25:41,295 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/91e6db4c26314a7bbeee533fec5e82ce as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/91e6db4c26314a7bbeee533fec5e82ce 2024-12-10T14:25:41,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/a084b50270dd43478507c3293caeaa8f is 50, key is test_row_0/B:col10/1733840739611/Put/seqid=0 2024-12-10T14:25:41,302 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6a10b03cb35849b6b32bc9f7434dd515 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6a10b03cb35849b6b32bc9f7434dd515 2024-12-10T14:25:41,305 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into 91e6db4c26314a7bbeee533fec5e82ce(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:41,305 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:41,305 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840740856; duration=0sec 2024-12-10T14:25:41,305 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:41,305 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:41,305 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:41,307 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:41,308 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:41,308 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:41,308 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5d6a2825558549a197cf9badac03a139, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1008ddc06a5f4a189d78867697a3cfc0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1a5524d8129b4673a217d15d9da88b90] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.8 K 2024-12-10T14:25:41,308 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d6a2825558549a197cf9badac03a139, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733840737571 2024-12-10T14:25:41,309 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1008ddc06a5f4a189d78867697a3cfc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=361, earliestPutTs=1733840737945 2024-12-10T14:25:41,310 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a5524d8129b4673a217d15d9da88b90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733840739076 2024-12-10T14:25:41,311 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into 6a10b03cb35849b6b32bc9f7434dd515(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:41,311 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:41,311 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840740856; duration=0sec 2024-12-10T14:25:41,311 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:41,311 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:41,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840801315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840801316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,322 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:41,341 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/a7c0e22acdb443cab9639e9be710e510 is 50, key is test_row_0/C:col10/1733840739581/Put/seqid=0 2024-12-10T14:25:41,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741920_1096 (size=12301) 2024-12-10T14:25:41,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741921_1097 (size=13187) 2024-12-10T14:25:41,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T14:25:41,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840801519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840801520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840801734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840801737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840801739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,743 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/a084b50270dd43478507c3293caeaa8f 2024-12-10T14:25:41,761 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/a7c0e22acdb443cab9639e9be710e510 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/a7c0e22acdb443cab9639e9be710e510 2024-12-10T14:25:41,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/b1d46f1152d3426dacd95769d0fafea0 is 50, key is test_row_0/C:col10/1733840739611/Put/seqid=0 2024-12-10T14:25:41,768 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into a7c0e22acdb443cab9639e9be710e510(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:41,768 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:41,768 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840740857; duration=0sec 2024-12-10T14:25:41,768 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:41,768 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:41,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741922_1098 (size=12301) 2024-12-10T14:25:41,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840801822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:41,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:41,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840801823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,182 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/b1d46f1152d3426dacd95769d0fafea0 2024-12-10T14:25:42,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/48889bff4ec34a658cbaa3dbbb69647e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/48889bff4ec34a658cbaa3dbbb69647e 2024-12-10T14:25:42,195 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/48889bff4ec34a658cbaa3dbbb69647e, entries=150, sequenceid=398, filesize=12.0 K 2024-12-10T14:25:42,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/a084b50270dd43478507c3293caeaa8f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/a084b50270dd43478507c3293caeaa8f 2024-12-10T14:25:42,201 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/a084b50270dd43478507c3293caeaa8f, entries=150, sequenceid=398, filesize=12.0 K 2024-12-10T14:25:42,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/b1d46f1152d3426dacd95769d0fafea0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b1d46f1152d3426dacd95769d0fafea0 2024-12-10T14:25:42,207 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b1d46f1152d3426dacd95769d0fafea0, entries=150, sequenceid=398, filesize=12.0 K 2024-12-10T14:25:42,208 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3cb281b62d072b2e7312c326c99dffff in 1351ms, sequenceid=398, compaction requested=false 2024-12-10T14:25:42,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:42,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:42,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-10T14:25:42,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-10T14:25:42,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-10T14:25:42,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8110 sec 2024-12-10T14:25:42,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.8180 sec 2024-12-10T14:25:42,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:42,327 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T14:25:42,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:42,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/45797bd1d0284db5b733a22cbc2122b3 is 50, key is test_row_0/A:col10/1733840741208/Put/seqid=0 2024-12-10T14:25:42,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741923_1099 (size=14741) 2024-12-10T14:25:42,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840802361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840802362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840802465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840802465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T14:25:42,508 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-10T14:25:42,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-10T14:25:42,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T14:25:42,512 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:42,512 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:42,512 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:42,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T14:25:42,664 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:42,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:42,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:42,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840802669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840802669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/45797bd1d0284db5b733a22cbc2122b3 2024-12-10T14:25:42,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/c2f8b56f978f44598bf2da387db982e8 is 50, key is test_row_0/B:col10/1733840741208/Put/seqid=0 2024-12-10T14:25:42,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741924_1100 (size=12301) 2024-12-10T14:25:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T14:25:42,817 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:42,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:42,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,970 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:42,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:42,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:42,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:42,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:42,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:42,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840802974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:42,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:42,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840802975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T14:25:43,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:43,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:43,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:43,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/c2f8b56f978f44598bf2da387db982e8 2024-12-10T14:25:43,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/693fe590253e4e339227c050d76301b4 is 50, key is test_row_0/C:col10/1733840741208/Put/seqid=0 2024-12-10T14:25:43,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741925_1101 (size=12301) 2024-12-10T14:25:43,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:43,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:43,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:43,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:43,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:43,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840803478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840803479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/693fe590253e4e339227c050d76301b4 2024-12-10T14:25:43,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/45797bd1d0284db5b733a22cbc2122b3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/45797bd1d0284db5b733a22cbc2122b3 2024-12-10T14:25:43,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/45797bd1d0284db5b733a22cbc2122b3, entries=200, sequenceid=415, filesize=14.4 K 2024-12-10T14:25:43,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:43,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:43,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:43,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/c2f8b56f978f44598bf2da387db982e8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/c2f8b56f978f44598bf2da387db982e8 2024-12-10T14:25:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:43,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/c2f8b56f978f44598bf2da387db982e8, entries=150, sequenceid=415, filesize=12.0 K 2024-12-10T14:25:43,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/693fe590253e4e339227c050d76301b4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/693fe590253e4e339227c050d76301b4 2024-12-10T14:25:43,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/693fe590253e4e339227c050d76301b4, entries=150, sequenceid=415, filesize=12.0 K 2024-12-10T14:25:43,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3cb281b62d072b2e7312c326c99dffff in 1271ms, sequenceid=415, compaction requested=true 2024-12-10T14:25:43,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:43,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:43,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:43,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:43,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:43,598 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:43,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:43,598 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:43,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:43,599 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:43,599 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:43,599 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:43,599 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:43,599 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,599 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,599 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6a10b03cb35849b6b32bc9f7434dd515, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/48889bff4ec34a658cbaa3dbbb69647e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/45797bd1d0284db5b733a22cbc2122b3] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=39.3 K 2024-12-10T14:25:43,599 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/91e6db4c26314a7bbeee533fec5e82ce, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/a084b50270dd43478507c3293caeaa8f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/c2f8b56f978f44598bf2da387db982e8] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.9 K 2024-12-10T14:25:43,600 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a10b03cb35849b6b32bc9f7434dd515, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733840739076 2024-12-10T14:25:43,600 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 91e6db4c26314a7bbeee533fec5e82ce, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733840739076 2024-12-10T14:25:43,600 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48889bff4ec34a658cbaa3dbbb69647e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840739606 2024-12-10T14:25:43,600 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a084b50270dd43478507c3293caeaa8f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840739606 2024-12-10T14:25:43,601 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c2f8b56f978f44598bf2da387db982e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733840741206 2024-12-10T14:25:43,601 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45797bd1d0284db5b733a22cbc2122b3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733840741206 2024-12-10T14:25:43,609 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:43,610 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/95388d76d81a4d0c80cd3a5c34ed3084 is 50, key is test_row_0/B:col10/1733840741208/Put/seqid=0 2024-12-10T14:25:43,612 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:43,613 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/c07c1afb559f4be79211c8fe07f0f1fb is 50, key is test_row_0/A:col10/1733840741208/Put/seqid=0 2024-12-10T14:25:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T14:25:43,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741926_1102 (size=13289) 2024-12-10T14:25:43,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741927_1103 (size=13289) 2024-12-10T14:25:43,628 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/95388d76d81a4d0c80cd3a5c34ed3084 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/95388d76d81a4d0c80cd3a5c34ed3084 2024-12-10T14:25:43,632 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/c07c1afb559f4be79211c8fe07f0f1fb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c07c1afb559f4be79211c8fe07f0f1fb 2024-12-10T14:25:43,636 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into 95388d76d81a4d0c80cd3a5c34ed3084(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:43,637 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:43,637 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840743598; duration=0sec 2024-12-10T14:25:43,637 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:43,637 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:43,637 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:43,639 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:43,639 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:43,639 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,640 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/a7c0e22acdb443cab9639e9be710e510, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b1d46f1152d3426dacd95769d0fafea0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/693fe590253e4e339227c050d76301b4] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=36.9 K 2024-12-10T14:25:43,640 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into c07c1afb559f4be79211c8fe07f0f1fb(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:43,640 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a7c0e22acdb443cab9639e9be710e510, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1733840739076 2024-12-10T14:25:43,640 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:43,640 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840743598; duration=0sec 2024-12-10T14:25:43,640 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:43,640 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:43,641 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b1d46f1152d3426dacd95769d0fafea0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840739606 2024-12-10T14:25:43,641 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 693fe590253e4e339227c050d76301b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733840741206 2024-12-10T14:25:43,651 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:43,652 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/6834a715e481499f93f3795cedb2d804 is 50, key is test_row_0/C:col10/1733840741208/Put/seqid=0 2024-12-10T14:25:43,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741928_1104 (size=13289) 2024-12-10T14:25:43,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:43,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T14:25:43,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:43,738 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T14:25:43,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:43,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:43,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:43,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:43,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:43,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:43,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. as already flushing 2024-12-10T14:25:43,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/5acc92cd83cb41dbb150477a06cdba20 is 50, key is test_row_0/A:col10/1733840742348/Put/seqid=0 2024-12-10T14:25:43,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840803755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840803757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840803758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741929_1105 (size=12301) 2024-12-10T14:25:43,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840803859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840803862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:43,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:43,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840803862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840804063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,066 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/6834a715e481499f93f3795cedb2d804 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6834a715e481499f93f3795cedb2d804 2024-12-10T14:25:44,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840804064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840804065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,072 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into 6834a715e481499f93f3795cedb2d804(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:44,072 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:44,072 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840743598; duration=0sec 2024-12-10T14:25:44,072 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:44,072 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:44,167 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/5acc92cd83cb41dbb150477a06cdba20 2024-12-10T14:25:44,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/9933e884dc6e4b5297d5afcefee3a601 is 50, key is test_row_0/B:col10/1733840742348/Put/seqid=0 2024-12-10T14:25:44,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741930_1106 (size=12301) 2024-12-10T14:25:44,190 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/9933e884dc6e4b5297d5afcefee3a601 2024-12-10T14:25:44,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/d345eea15d814998ad79f83a951bf5ea is 50, key is test_row_0/C:col10/1733840742348/Put/seqid=0 2024-12-10T14:25:44,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741931_1107 (size=12301) 2024-12-10T14:25:44,207 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/d345eea15d814998ad79f83a951bf5ea 2024-12-10T14:25:44,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/5acc92cd83cb41dbb150477a06cdba20 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5acc92cd83cb41dbb150477a06cdba20 2024-12-10T14:25:44,219 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5acc92cd83cb41dbb150477a06cdba20, entries=150, sequenceid=439, filesize=12.0 K 2024-12-10T14:25:44,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/9933e884dc6e4b5297d5afcefee3a601 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9933e884dc6e4b5297d5afcefee3a601 2024-12-10T14:25:44,225 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9933e884dc6e4b5297d5afcefee3a601, entries=150, sequenceid=439, filesize=12.0 K 2024-12-10T14:25:44,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/d345eea15d814998ad79f83a951bf5ea as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d345eea15d814998ad79f83a951bf5ea 2024-12-10T14:25:44,232 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d345eea15d814998ad79f83a951bf5ea, entries=150, sequenceid=439, filesize=12.0 K 2024-12-10T14:25:44,234 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 3cb281b62d072b2e7312c326c99dffff in 495ms, sequenceid=439, compaction requested=false 2024-12-10T14:25:44,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:44,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:44,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-10T14:25:44,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-10T14:25:44,236 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-10T14:25:44,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7230 sec 2024-12-10T14:25:44,238 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.7270 sec 2024-12-10T14:25:44,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:44,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:25:44,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:44,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:44,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:44,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:44,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:44,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:44,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840804386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/bbaa34821b70496f93800f82998cc4b2 is 50, key is test_row_0/A:col10/1733840744367/Put/seqid=0 2024-12-10T14:25:44,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840804387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840804389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741932_1108 (size=12301) 2024-12-10T14:25:44,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/bbaa34821b70496f93800f82998cc4b2 2024-12-10T14:25:44,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0bcb75eff2234e7796f7410122c45934 is 50, key is test_row_0/B:col10/1733840744367/Put/seqid=0 2024-12-10T14:25:44,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741933_1109 (size=12301) 2024-12-10T14:25:44,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0bcb75eff2234e7796f7410122c45934 2024-12-10T14:25:44,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/315ab61c8e3e4a14b7ead00e799c738c is 50, key is test_row_0/C:col10/1733840744367/Put/seqid=0 2024-12-10T14:25:44,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741934_1110 (size=12301) 2024-12-10T14:25:44,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44304 deadline: 1733840804488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44280 deadline: 1733840804488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840804491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840804492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840804494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,599 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09ed28bb to 127.0.0.1:58494 2024-12-10T14:25:44,599 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:44,601 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12a1285d to 127.0.0.1:58494 2024-12-10T14:25:44,601 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:44,602 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47fe2fa7 to 127.0.0.1:58494 2024-12-10T14:25:44,602 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:44,604 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bc462 to 127.0.0.1:58494 2024-12-10T14:25:44,604 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:44,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T14:25:44,615 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-10T14:25:44,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44294 deadline: 1733840804692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44314 deadline: 1733840804695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:44,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44298 deadline: 1733840804698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:44,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/315ab61c8e3e4a14b7ead00e799c738c 2024-12-10T14:25:44,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/bbaa34821b70496f93800f82998cc4b2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/bbaa34821b70496f93800f82998cc4b2 2024-12-10T14:25:44,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/bbaa34821b70496f93800f82998cc4b2, entries=150, sequenceid=455, filesize=12.0 K 2024-12-10T14:25:44,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/0bcb75eff2234e7796f7410122c45934 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0bcb75eff2234e7796f7410122c45934 2024-12-10T14:25:44,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0bcb75eff2234e7796f7410122c45934, entries=150, sequenceid=455, filesize=12.0 K 2024-12-10T14:25:44,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/315ab61c8e3e4a14b7ead00e799c738c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/315ab61c8e3e4a14b7ead00e799c738c 2024-12-10T14:25:44,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/315ab61c8e3e4a14b7ead00e799c738c, entries=150, sequenceid=455, filesize=12.0 K 2024-12-10T14:25:44,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 3cb281b62d072b2e7312c326c99dffff in 483ms, sequenceid=455, compaction requested=true 2024-12-10T14:25:44,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:44,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:44,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:44,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:44,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:44,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3cb281b62d072b2e7312c326c99dffff:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:44,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:44,851 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:44,851 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:44,852 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:44,852 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:44,852 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/A is initiating minor compaction (all files) 2024-12-10T14:25:44,852 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/B is initiating minor compaction (all files) 2024-12-10T14:25:44,853 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/B in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:44,853 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/A in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:44,853 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/95388d76d81a4d0c80cd3a5c34ed3084, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9933e884dc6e4b5297d5afcefee3a601, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0bcb75eff2234e7796f7410122c45934] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=37.0 K 2024-12-10T14:25:44,853 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c07c1afb559f4be79211c8fe07f0f1fb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5acc92cd83cb41dbb150477a06cdba20, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/bbaa34821b70496f93800f82998cc4b2] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=37.0 K 2024-12-10T14:25:44,853 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 95388d76d81a4d0c80cd3a5c34ed3084, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733840741206 2024-12-10T14:25:44,853 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting c07c1afb559f4be79211c8fe07f0f1fb, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733840741206 2024-12-10T14:25:44,853 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 9933e884dc6e4b5297d5afcefee3a601, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1733840742348 2024-12-10T14:25:44,854 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5acc92cd83cb41dbb150477a06cdba20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1733840742348 2024-12-10T14:25:44,854 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bcb75eff2234e7796f7410122c45934, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733840743751 2024-12-10T14:25:44,854 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbaa34821b70496f93800f82998cc4b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733840743751 2024-12-10T14:25:44,862 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#B#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:44,863 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#A#compaction#97 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:44,863 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/cace6bcf58ef4f73b26246afcb07f937 is 50, key is test_row_0/B:col10/1733840744367/Put/seqid=0 2024-12-10T14:25:44,864 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/e550f6430d074bd48b8f23c590c908be is 50, key is test_row_0/A:col10/1733840744367/Put/seqid=0 2024-12-10T14:25:44,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741935_1111 (size=13391) 2024-12-10T14:25:44,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741936_1112 (size=13391) 2024-12-10T14:25:44,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:44,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T14:25:44,997 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18cb251d to 127.0.0.1:58494 2024-12-10T14:25:44,997 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:44,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:44,998 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f2091cc to 127.0.0.1:58494 2024-12-10T14:25:44,998 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:44,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:44,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:44,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:44,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:44,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:45,001 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45b55c24 to 127.0.0.1:58494 2024-12-10T14:25:45,001 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:45,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6d9d38634ead405c8e060102ff6b8c67 is 50, key is test_row_0/A:col10/1733840744385/Put/seqid=0 2024-12-10T14:25:45,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741937_1113 (size=12301) 2024-12-10T14:25:45,274 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/e550f6430d074bd48b8f23c590c908be as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/e550f6430d074bd48b8f23c590c908be 2024-12-10T14:25:45,275 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/cace6bcf58ef4f73b26246afcb07f937 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/cace6bcf58ef4f73b26246afcb07f937 2024-12-10T14:25:45,279 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/A of 3cb281b62d072b2e7312c326c99dffff into e550f6430d074bd48b8f23c590c908be(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:45,279 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:45,279 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/A, priority=13, startTime=1733840744851; duration=0sec 2024-12-10T14:25:45,279 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:45,279 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/B of 3cb281b62d072b2e7312c326c99dffff into cace6bcf58ef4f73b26246afcb07f937(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:45,279 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:A 2024-12-10T14:25:45,279 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:45,279 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/B, priority=13, startTime=1733840744851; duration=0sec 2024-12-10T14:25:45,280 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:45,280 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:45,280 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:B 2024-12-10T14:25:45,281 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:45,281 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 3cb281b62d072b2e7312c326c99dffff/C is initiating minor compaction (all files) 2024-12-10T14:25:45,281 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3cb281b62d072b2e7312c326c99dffff/C in TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:45,281 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6834a715e481499f93f3795cedb2d804, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d345eea15d814998ad79f83a951bf5ea, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/315ab61c8e3e4a14b7ead00e799c738c] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp, totalSize=37.0 K 2024-12-10T14:25:45,281 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6834a715e481499f93f3795cedb2d804, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1733840741206 2024-12-10T14:25:45,282 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting d345eea15d814998ad79f83a951bf5ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1733840742348 2024-12-10T14:25:45,282 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 315ab61c8e3e4a14b7ead00e799c738c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=455, earliestPutTs=1733840743751 2024-12-10T14:25:45,289 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3cb281b62d072b2e7312c326c99dffff#C#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:45,289 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/d980486fde14484ba1dc0706df54e912 is 50, key is test_row_0/C:col10/1733840744367/Put/seqid=0 2024-12-10T14:25:45,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741938_1114 (size=13391) 2024-12-10T14:25:45,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6d9d38634ead405c8e060102ff6b8c67 2024-12-10T14:25:45,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b28fc33dcdeb4f1ab588957cf0cd3a9b is 50, key is test_row_0/B:col10/1733840744385/Put/seqid=0 2024-12-10T14:25:45,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741939_1115 (size=12301) 2024-12-10T14:25:45,698 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/d980486fde14484ba1dc0706df54e912 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d980486fde14484ba1dc0706df54e912 2024-12-10T14:25:45,704 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3cb281b62d072b2e7312c326c99dffff/C of 3cb281b62d072b2e7312c326c99dffff into d980486fde14484ba1dc0706df54e912(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:45,704 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:45,704 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff., storeName=3cb281b62d072b2e7312c326c99dffff/C, priority=13, startTime=1733840744851; duration=0sec 2024-12-10T14:25:45,704 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:45,704 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3cb281b62d072b2e7312c326c99dffff:C 2024-12-10T14:25:45,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b28fc33dcdeb4f1ab588957cf0cd3a9b 2024-12-10T14:25:45,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/b5d028629f2540218b887030f88a7cce is 50, key is test_row_0/C:col10/1733840744385/Put/seqid=0 2024-12-10T14:25:45,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741940_1116 (size=12301) 2024-12-10T14:25:46,177 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:25:46,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/b5d028629f2540218b887030f88a7cce 2024-12-10T14:25:46,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/6d9d38634ead405c8e060102ff6b8c67 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d9d38634ead405c8e060102ff6b8c67 2024-12-10T14:25:46,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d9d38634ead405c8e060102ff6b8c67, entries=150, sequenceid=478, filesize=12.0 K 2024-12-10T14:25:46,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/b28fc33dcdeb4f1ab588957cf0cd3a9b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b28fc33dcdeb4f1ab588957cf0cd3a9b 2024-12-10T14:25:46,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b28fc33dcdeb4f1ab588957cf0cd3a9b, entries=150, sequenceid=478, filesize=12.0 K 2024-12-10T14:25:46,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/b5d028629f2540218b887030f88a7cce as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b5d028629f2540218b887030f88a7cce 2024-12-10T14:25:46,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b5d028629f2540218b887030f88a7cce, entries=150, sequenceid=478, filesize=12.0 K 2024-12-10T14:25:46,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=6.71 KB/6870 for 3cb281b62d072b2e7312c326c99dffff in 1257ms, sequenceid=478, compaction requested=false 2024-12-10T14:25:46,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:46,492 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09bd0964 to 127.0.0.1:58494 2024-12-10T14:25:46,492 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e52b42a to 127.0.0.1:58494 2024-12-10T14:25:46,492 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:46,492 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 131 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6872 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6616 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2968 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8903 rows 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2963 2024-12-10T14:25:46,493 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8888 rows 2024-12-10T14:25:46,493 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:25:46,493 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6fcb5f29 to 127.0.0.1:58494 2024-12-10T14:25:46,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:25:46,500 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T14:25:46,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T14:25:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:46,511 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840746511"}]},"ts":"1733840746511"} 2024-12-10T14:25:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:25:46,512 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T14:25:46,514 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T14:25:46,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:25:46,520 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3cb281b62d072b2e7312c326c99dffff, UNASSIGN}] 2024-12-10T14:25:46,521 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3cb281b62d072b2e7312c326c99dffff, UNASSIGN 2024-12-10T14:25:46,521 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=3cb281b62d072b2e7312c326c99dffff, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:46,522 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:25:46,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:25:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:25:46,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:46,679 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:46,679 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 3cb281b62d072b2e7312c326c99dffff, disabling compactions & flushes 2024-12-10T14:25:46,680 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. after waiting 0 ms 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:46,680 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing 3cb281b62d072b2e7312c326c99dffff 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=A 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=B 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:46,680 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3cb281b62d072b2e7312c326c99dffff, store=C 2024-12-10T14:25:46,681 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:46,684 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/7cda088f06d0434893593ee6f50de188 is 50, key is test_row_0/A:col10/1733840746491/Put/seqid=0 2024-12-10T14:25:46,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741941_1117 (size=9857) 2024-12-10T14:25:46,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:25:47,089 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/7cda088f06d0434893593ee6f50de188 2024-12-10T14:25:47,097 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/cad06f40860c4eb7bb74be5acde02825 is 50, key is test_row_0/B:col10/1733840746491/Put/seqid=0 2024-12-10T14:25:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741942_1118 (size=9857) 2024-12-10T14:25:47,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:25:47,501 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/cad06f40860c4eb7bb74be5acde02825 2024-12-10T14:25:47,508 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/2e0b0c51482b4d49ad92157d8dbe8668 is 50, key is test_row_0/C:col10/1733840746491/Put/seqid=0 2024-12-10T14:25:47,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741943_1119 (size=9857) 2024-12-10T14:25:47,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:25:47,912 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/2e0b0c51482b4d49ad92157d8dbe8668 2024-12-10T14:25:47,917 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/A/7cda088f06d0434893593ee6f50de188 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/7cda088f06d0434893593ee6f50de188 2024-12-10T14:25:47,922 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/7cda088f06d0434893593ee6f50de188, entries=100, sequenceid=487, filesize=9.6 K 2024-12-10T14:25:47,923 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/B/cad06f40860c4eb7bb74be5acde02825 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/cad06f40860c4eb7bb74be5acde02825 2024-12-10T14:25:47,928 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/cad06f40860c4eb7bb74be5acde02825, entries=100, sequenceid=487, filesize=9.6 K 2024-12-10T14:25:47,929 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/.tmp/C/2e0b0c51482b4d49ad92157d8dbe8668 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/2e0b0c51482b4d49ad92157d8dbe8668 2024-12-10T14:25:47,933 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/2e0b0c51482b4d49ad92157d8dbe8668, entries=100, sequenceid=487, filesize=9.6 K 2024-12-10T14:25:47,934 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 3cb281b62d072b2e7312c326c99dffff in 1254ms, sequenceid=487, compaction requested=true 2024-12-10T14:25:47,934 DEBUG [StoreCloser-TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/8ee59d6064f54e828cd79a60f6d91bad, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/df8b7b5229804b6b9be472dfc5cfbcb9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/83a91d9be4ed41ee928f346fd40ec2cc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/1b08846939ae4fe18805bc803b564f9e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/53aae04281384014b6729d4f091e9e65, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4fdea548b8464ccdaf71e315d9782a58, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/b67c21a9dc6d488baa67e268e2937986, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c5f9533855704aa39c8fa31fc7d5b469, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4e871c80de78401887ce5857a68c6f9e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cc8c0b710ff44e51a1883116be73a74c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c1c7b2e9a1eb46ee962f5a5310761096, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/50298a230b584fb198f5a8f2167009e6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a2f9d609cdb74fefb2566dc4aba5942d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2b032448d140492ab1e986303c79cef6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/759c557d399b48f0b7ddebc02190eb84, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cbfd671a78524d7f8b2f11ebcb1f55ba, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/96b5cd447187452b9ce1127802604f95, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d06c88a831a4da485e3e7aacb6a54b4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/fd5513509ad04e3dbbb2158be3b9efec, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/0905db215254437cae9badc24347f8c5, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a17bcae373654221b80caff61239aeae, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/71be7a02b3cd4e589d5b20fd5c971eb7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/12f9a9e03e3742b99256664c1a3d729b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2e6e6fa389d741e59e011cb78dbb2d1b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5e720df185764364b5e8fd4dc7a8410c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/69d8f0834b8247318b529850c524cc6e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6a10b03cb35849b6b32bc9f7434dd515, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/48889bff4ec34a658cbaa3dbbb69647e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/45797bd1d0284db5b733a22cbc2122b3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c07c1afb559f4be79211c8fe07f0f1fb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5acc92cd83cb41dbb150477a06cdba20, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/bbaa34821b70496f93800f82998cc4b2] to archive 2024-12-10T14:25:47,938 DEBUG [StoreCloser-TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:25:47,946 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c5f9533855704aa39c8fa31fc7d5b469 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c5f9533855704aa39c8fa31fc7d5b469 2024-12-10T14:25:47,946 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/53aae04281384014b6729d4f091e9e65 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/53aae04281384014b6729d4f091e9e65 2024-12-10T14:25:47,946 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/1b08846939ae4fe18805bc803b564f9e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/1b08846939ae4fe18805bc803b564f9e 2024-12-10T14:25:47,946 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/df8b7b5229804b6b9be472dfc5cfbcb9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/df8b7b5229804b6b9be472dfc5cfbcb9 2024-12-10T14:25:47,947 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/b67c21a9dc6d488baa67e268e2937986 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/b67c21a9dc6d488baa67e268e2937986 2024-12-10T14:25:47,947 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4fdea548b8464ccdaf71e315d9782a58 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4fdea548b8464ccdaf71e315d9782a58 2024-12-10T14:25:47,947 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/8ee59d6064f54e828cd79a60f6d91bad to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/8ee59d6064f54e828cd79a60f6d91bad 2024-12-10T14:25:47,947 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/83a91d9be4ed41ee928f346fd40ec2cc to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/83a91d9be4ed41ee928f346fd40ec2cc 2024-12-10T14:25:47,949 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a2f9d609cdb74fefb2566dc4aba5942d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a2f9d609cdb74fefb2566dc4aba5942d 2024-12-10T14:25:47,949 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4e871c80de78401887ce5857a68c6f9e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/4e871c80de78401887ce5857a68c6f9e 2024-12-10T14:25:47,950 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cc8c0b710ff44e51a1883116be73a74c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cc8c0b710ff44e51a1883116be73a74c 2024-12-10T14:25:47,950 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/759c557d399b48f0b7ddebc02190eb84 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/759c557d399b48f0b7ddebc02190eb84 2024-12-10T14:25:47,950 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/50298a230b584fb198f5a8f2167009e6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/50298a230b584fb198f5a8f2167009e6 2024-12-10T14:25:47,951 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c1c7b2e9a1eb46ee962f5a5310761096 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c1c7b2e9a1eb46ee962f5a5310761096 2024-12-10T14:25:47,951 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2b032448d140492ab1e986303c79cef6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2b032448d140492ab1e986303c79cef6 2024-12-10T14:25:47,954 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/96b5cd447187452b9ce1127802604f95 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/96b5cd447187452b9ce1127802604f95 2024-12-10T14:25:47,954 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/fd5513509ad04e3dbbb2158be3b9efec to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/fd5513509ad04e3dbbb2158be3b9efec 2024-12-10T14:25:47,954 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a17bcae373654221b80caff61239aeae to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/a17bcae373654221b80caff61239aeae 2024-12-10T14:25:47,954 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cbfd671a78524d7f8b2f11ebcb1f55ba to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/cbfd671a78524d7f8b2f11ebcb1f55ba 2024-12-10T14:25:47,954 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/0905db215254437cae9badc24347f8c5 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/0905db215254437cae9badc24347f8c5 2024-12-10T14:25:47,955 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/71be7a02b3cd4e589d5b20fd5c971eb7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/71be7a02b3cd4e589d5b20fd5c971eb7 2024-12-10T14:25:47,955 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/12f9a9e03e3742b99256664c1a3d729b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/12f9a9e03e3742b99256664c1a3d729b 2024-12-10T14:25:47,957 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2e6e6fa389d741e59e011cb78dbb2d1b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/2e6e6fa389d741e59e011cb78dbb2d1b 2024-12-10T14:25:47,957 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/69d8f0834b8247318b529850c524cc6e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/69d8f0834b8247318b529850c524cc6e 2024-12-10T14:25:47,957 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5e720df185764364b5e8fd4dc7a8410c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5e720df185764364b5e8fd4dc7a8410c 2024-12-10T14:25:47,957 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/48889bff4ec34a658cbaa3dbbb69647e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/48889bff4ec34a658cbaa3dbbb69647e 2024-12-10T14:25:47,958 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6a10b03cb35849b6b32bc9f7434dd515 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6a10b03cb35849b6b32bc9f7434dd515 2024-12-10T14:25:47,958 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d06c88a831a4da485e3e7aacb6a54b4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d06c88a831a4da485e3e7aacb6a54b4 2024-12-10T14:25:47,958 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/45797bd1d0284db5b733a22cbc2122b3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/45797bd1d0284db5b733a22cbc2122b3 2024-12-10T14:25:47,958 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c07c1afb559f4be79211c8fe07f0f1fb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/c07c1afb559f4be79211c8fe07f0f1fb 2024-12-10T14:25:47,959 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5acc92cd83cb41dbb150477a06cdba20 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/5acc92cd83cb41dbb150477a06cdba20 2024-12-10T14:25:47,959 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/bbaa34821b70496f93800f82998cc4b2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/bbaa34821b70496f93800f82998cc4b2 2024-12-10T14:25:47,973 DEBUG [StoreCloser-TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e8ba0b70cd4a4850911a78788d98007b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b34d8a68242b4da4b58fcfb81f853cd7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/4963e235156442968bc748378b051b57, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0937a42d2a834c75a571f23066425f78, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/eb970e9a17e24911acd6d9772d8e0025, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/dd060bf6a1b04479847bd0cf05b829e4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/3dcf522234b9418a9ebf6914804bc5e7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b2afd298e90b4f8a9803993d35a7d286, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/84a1a97fb2354c28adff0eabe74a4bb1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9e95cb791ff54c66b9b2b5c5433d5f6f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1313997576f64cbba1b9bd6b9fe69e0e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/377aeea3def04c7386f03d34e8b83134, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b09d8b8355db4b5bbb52e6670c6d48ed, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1780548396a24aa69b9d4ecd139a0ec2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/7b60d09a503a4ea8a67b39cd499980cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/ac10ea0378024dfbabdaeb67eae51c99, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bf440981ccb34b7097205797c9ea5383, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bd87b6023c834aea9e221370411545f9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b142622386af4bd3b5eebfcd91b7d6df, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/f2bddac2595840ce8d20c5d3be04fe69, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/392847ade0404aa2968c732e3eaece1f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0a211c24acc54630800e3044990f338e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e20f84e12cad4388843c5d35f332ea8f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/af7d12f2d56846e380c72488677dff2a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/d82e08461d5b48edaac427bfb0c406cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/91e6db4c26314a7bbeee533fec5e82ce, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/67f16c6230764b67b5fae9fca9acc6e1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/a084b50270dd43478507c3293caeaa8f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/95388d76d81a4d0c80cd3a5c34ed3084, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/c2f8b56f978f44598bf2da387db982e8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9933e884dc6e4b5297d5afcefee3a601, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0bcb75eff2234e7796f7410122c45934] to archive 2024-12-10T14:25:47,974 DEBUG [StoreCloser-TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:25:47,976 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e8ba0b70cd4a4850911a78788d98007b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e8ba0b70cd4a4850911a78788d98007b 2024-12-10T14:25:47,977 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/eb970e9a17e24911acd6d9772d8e0025 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/eb970e9a17e24911acd6d9772d8e0025 2024-12-10T14:25:47,977 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/4963e235156442968bc748378b051b57 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/4963e235156442968bc748378b051b57 2024-12-10T14:25:47,977 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0937a42d2a834c75a571f23066425f78 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0937a42d2a834c75a571f23066425f78 2024-12-10T14:25:47,977 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/dd060bf6a1b04479847bd0cf05b829e4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/dd060bf6a1b04479847bd0cf05b829e4 2024-12-10T14:25:47,977 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/3dcf522234b9418a9ebf6914804bc5e7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/3dcf522234b9418a9ebf6914804bc5e7 2024-12-10T14:25:47,977 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b34d8a68242b4da4b58fcfb81f853cd7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b34d8a68242b4da4b58fcfb81f853cd7 2024-12-10T14:25:47,978 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b2afd298e90b4f8a9803993d35a7d286 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b2afd298e90b4f8a9803993d35a7d286 2024-12-10T14:25:47,979 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/84a1a97fb2354c28adff0eabe74a4bb1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/84a1a97fb2354c28adff0eabe74a4bb1 2024-12-10T14:25:47,979 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9e95cb791ff54c66b9b2b5c5433d5f6f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9e95cb791ff54c66b9b2b5c5433d5f6f 2024-12-10T14:25:47,979 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1313997576f64cbba1b9bd6b9fe69e0e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1313997576f64cbba1b9bd6b9fe69e0e 2024-12-10T14:25:47,979 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1780548396a24aa69b9d4ecd139a0ec2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/1780548396a24aa69b9d4ecd139a0ec2 2024-12-10T14:25:47,980 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/377aeea3def04c7386f03d34e8b83134 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/377aeea3def04c7386f03d34e8b83134 2024-12-10T14:25:47,980 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b09d8b8355db4b5bbb52e6670c6d48ed to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b09d8b8355db4b5bbb52e6670c6d48ed 2024-12-10T14:25:47,980 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/7b60d09a503a4ea8a67b39cd499980cf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/7b60d09a503a4ea8a67b39cd499980cf 2024-12-10T14:25:47,980 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/ac10ea0378024dfbabdaeb67eae51c99 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/ac10ea0378024dfbabdaeb67eae51c99 2024-12-10T14:25:47,982 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bf440981ccb34b7097205797c9ea5383 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bf440981ccb34b7097205797c9ea5383 2024-12-10T14:25:47,982 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bd87b6023c834aea9e221370411545f9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/bd87b6023c834aea9e221370411545f9 2024-12-10T14:25:47,982 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b142622386af4bd3b5eebfcd91b7d6df to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b142622386af4bd3b5eebfcd91b7d6df 2024-12-10T14:25:47,982 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/f2bddac2595840ce8d20c5d3be04fe69 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/f2bddac2595840ce8d20c5d3be04fe69 2024-12-10T14:25:47,982 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/392847ade0404aa2968c732e3eaece1f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/392847ade0404aa2968c732e3eaece1f 2024-12-10T14:25:47,983 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0a211c24acc54630800e3044990f338e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0a211c24acc54630800e3044990f338e 2024-12-10T14:25:47,983 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/af7d12f2d56846e380c72488677dff2a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/af7d12f2d56846e380c72488677dff2a 2024-12-10T14:25:47,983 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e20f84e12cad4388843c5d35f332ea8f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/e20f84e12cad4388843c5d35f332ea8f 2024-12-10T14:25:47,984 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/91e6db4c26314a7bbeee533fec5e82ce to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/91e6db4c26314a7bbeee533fec5e82ce 2024-12-10T14:25:47,985 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/d82e08461d5b48edaac427bfb0c406cb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/d82e08461d5b48edaac427bfb0c406cb 2024-12-10T14:25:47,985 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/95388d76d81a4d0c80cd3a5c34ed3084 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/95388d76d81a4d0c80cd3a5c34ed3084 2024-12-10T14:25:47,985 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/c2f8b56f978f44598bf2da387db982e8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/c2f8b56f978f44598bf2da387db982e8 2024-12-10T14:25:47,985 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9933e884dc6e4b5297d5afcefee3a601 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/9933e884dc6e4b5297d5afcefee3a601 2024-12-10T14:25:47,985 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0bcb75eff2234e7796f7410122c45934 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/0bcb75eff2234e7796f7410122c45934 2024-12-10T14:25:47,985 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/a084b50270dd43478507c3293caeaa8f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/a084b50270dd43478507c3293caeaa8f 2024-12-10T14:25:47,986 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/67f16c6230764b67b5fae9fca9acc6e1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/67f16c6230764b67b5fae9fca9acc6e1 2024-12-10T14:25:47,987 DEBUG [StoreCloser-TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/36f2a51773314d84a399522e2f680fd2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f88802fa4f9647cc9b847d5d117165ec, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6da94d21da5247bb97b3d393c614dcc9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/822b8114acb5401bbcff8c9b49648d89, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/4afcb3589c704fc08f8808fa91241566, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/75d45f2ca3524098b9941df42b62eed7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e53381c4bba4497dbda1bb7a5fce2294, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/8185a2c5a98c4eac86e3253711af5fa3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/04a17f072bc54e568921782e61228eb7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/285590ad83964ec98bf8b9c09bb5a70e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e5169fb015304f1d9b74aed33ec15ea8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/120ae275a2cd44f3b59c414329e3b445, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/01251027cd8748959a0fa6beb01e2753, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/0dbec229c8f74ea0b60f65e3e9e535b4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/22a6869916a940c9a610df976d74344e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5e3207c903e04b49ab003b5d14c99aa8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/254eca82a47f471daa6dff57316b3f09, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f225c747f7f84603a71be7073cdd9d38, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/49bb266da6f94d2987e349719779e38a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/c0096be60c894b2ea0aedb1ff54f195a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/3dff2e8c5e294a319d41aae919ef6241, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/43c088657aeb4a3dbf9d8a63faad61c7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5d6a2825558549a197cf9badac03a139, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5b049c328a614c0aba101ff5c279ffa4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1008ddc06a5f4a189d78867697a3cfc0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/a7c0e22acdb443cab9639e9be710e510, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1a5524d8129b4673a217d15d9da88b90, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b1d46f1152d3426dacd95769d0fafea0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6834a715e481499f93f3795cedb2d804, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/693fe590253e4e339227c050d76301b4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d345eea15d814998ad79f83a951bf5ea, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/315ab61c8e3e4a14b7ead00e799c738c] to archive 2024-12-10T14:25:47,988 DEBUG [StoreCloser-TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:25:47,991 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/36f2a51773314d84a399522e2f680fd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/36f2a51773314d84a399522e2f680fd2 2024-12-10T14:25:47,991 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/75d45f2ca3524098b9941df42b62eed7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/75d45f2ca3524098b9941df42b62eed7 2024-12-10T14:25:47,991 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f88802fa4f9647cc9b847d5d117165ec to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f88802fa4f9647cc9b847d5d117165ec 2024-12-10T14:25:47,991 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6da94d21da5247bb97b3d393c614dcc9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6da94d21da5247bb97b3d393c614dcc9 2024-12-10T14:25:47,991 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e53381c4bba4497dbda1bb7a5fce2294 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e53381c4bba4497dbda1bb7a5fce2294 2024-12-10T14:25:47,992 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/4afcb3589c704fc08f8808fa91241566 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/4afcb3589c704fc08f8808fa91241566 2024-12-10T14:25:47,992 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/8185a2c5a98c4eac86e3253711af5fa3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/8185a2c5a98c4eac86e3253711af5fa3 2024-12-10T14:25:47,992 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/822b8114acb5401bbcff8c9b49648d89 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/822b8114acb5401bbcff8c9b49648d89 2024-12-10T14:25:47,993 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e5169fb015304f1d9b74aed33ec15ea8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/e5169fb015304f1d9b74aed33ec15ea8 2024-12-10T14:25:47,993 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/0dbec229c8f74ea0b60f65e3e9e535b4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/0dbec229c8f74ea0b60f65e3e9e535b4 2024-12-10T14:25:47,994 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/04a17f072bc54e568921782e61228eb7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/04a17f072bc54e568921782e61228eb7 2024-12-10T14:25:47,994 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/22a6869916a940c9a610df976d74344e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/22a6869916a940c9a610df976d74344e 2024-12-10T14:25:47,994 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/01251027cd8748959a0fa6beb01e2753 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/01251027cd8748959a0fa6beb01e2753 2024-12-10T14:25:47,994 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/285590ad83964ec98bf8b9c09bb5a70e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/285590ad83964ec98bf8b9c09bb5a70e 2024-12-10T14:25:47,994 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5e3207c903e04b49ab003b5d14c99aa8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5e3207c903e04b49ab003b5d14c99aa8 2024-12-10T14:25:47,995 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/120ae275a2cd44f3b59c414329e3b445 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/120ae275a2cd44f3b59c414329e3b445 2024-12-10T14:25:47,996 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/49bb266da6f94d2987e349719779e38a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/49bb266da6f94d2987e349719779e38a 2024-12-10T14:25:47,996 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/254eca82a47f471daa6dff57316b3f09 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/254eca82a47f471daa6dff57316b3f09 2024-12-10T14:25:47,996 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f225c747f7f84603a71be7073cdd9d38 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/f225c747f7f84603a71be7073cdd9d38 2024-12-10T14:25:47,997 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/c0096be60c894b2ea0aedb1ff54f195a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/c0096be60c894b2ea0aedb1ff54f195a 2024-12-10T14:25:47,997 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/3dff2e8c5e294a319d41aae919ef6241 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/3dff2e8c5e294a319d41aae919ef6241 2024-12-10T14:25:47,997 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/43c088657aeb4a3dbf9d8a63faad61c7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/43c088657aeb4a3dbf9d8a63faad61c7 2024-12-10T14:25:47,997 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5b049c328a614c0aba101ff5c279ffa4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5b049c328a614c0aba101ff5c279ffa4 2024-12-10T14:25:47,998 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5d6a2825558549a197cf9badac03a139 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/5d6a2825558549a197cf9badac03a139 2024-12-10T14:25:47,999 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1008ddc06a5f4a189d78867697a3cfc0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1008ddc06a5f4a189d78867697a3cfc0 2024-12-10T14:25:47,999 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1a5524d8129b4673a217d15d9da88b90 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/1a5524d8129b4673a217d15d9da88b90 2024-12-10T14:25:47,999 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b1d46f1152d3426dacd95769d0fafea0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b1d46f1152d3426dacd95769d0fafea0 2024-12-10T14:25:48,000 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/a7c0e22acdb443cab9639e9be710e510 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/a7c0e22acdb443cab9639e9be710e510 2024-12-10T14:25:48,000 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6834a715e481499f93f3795cedb2d804 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/6834a715e481499f93f3795cedb2d804 2024-12-10T14:25:48,000 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/693fe590253e4e339227c050d76301b4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/693fe590253e4e339227c050d76301b4 2024-12-10T14:25:48,000 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/315ab61c8e3e4a14b7ead00e799c738c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/315ab61c8e3e4a14b7ead00e799c738c 2024-12-10T14:25:48,000 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d345eea15d814998ad79f83a951bf5ea to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d345eea15d814998ad79f83a951bf5ea 2024-12-10T14:25:48,005 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/recovered.edits/490.seqid, newMaxSeqId=490, maxSeqId=1 2024-12-10T14:25:48,008 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff. 2024-12-10T14:25:48,008 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 3cb281b62d072b2e7312c326c99dffff: 2024-12-10T14:25:48,009 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:48,010 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=3cb281b62d072b2e7312c326c99dffff, regionState=CLOSED 2024-12-10T14:25:48,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-10T14:25:48,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 3cb281b62d072b2e7312c326c99dffff, server=db1d50717577,46699,1733840717757 in 1.4890 sec 2024-12-10T14:25:48,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-10T14:25:48,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3cb281b62d072b2e7312c326c99dffff, UNASSIGN in 1.4920 sec 2024-12-10T14:25:48,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-10T14:25:48,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5000 sec 2024-12-10T14:25:48,017 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840748017"}]},"ts":"1733840748017"} 2024-12-10T14:25:48,018 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T14:25:48,020 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T14:25:48,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5150 sec 2024-12-10T14:25:48,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T14:25:48,615 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-10T14:25:48,618 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T14:25:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:48,623 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:48,625 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:48,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-10T14:25:48,627 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:48,631 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/recovered.edits] 2024-12-10T14:25:48,635 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/e550f6430d074bd48b8f23c590c908be to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/e550f6430d074bd48b8f23c590c908be 2024-12-10T14:25:48,635 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/7cda088f06d0434893593ee6f50de188 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/7cda088f06d0434893593ee6f50de188 2024-12-10T14:25:48,635 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d9d38634ead405c8e060102ff6b8c67 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/A/6d9d38634ead405c8e060102ff6b8c67 2024-12-10T14:25:48,639 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/cad06f40860c4eb7bb74be5acde02825 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/cad06f40860c4eb7bb74be5acde02825 2024-12-10T14:25:48,639 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b28fc33dcdeb4f1ab588957cf0cd3a9b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/b28fc33dcdeb4f1ab588957cf0cd3a9b 2024-12-10T14:25:48,639 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/cace6bcf58ef4f73b26246afcb07f937 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/B/cace6bcf58ef4f73b26246afcb07f937 2024-12-10T14:25:48,643 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/2e0b0c51482b4d49ad92157d8dbe8668 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/2e0b0c51482b4d49ad92157d8dbe8668 2024-12-10T14:25:48,643 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d980486fde14484ba1dc0706df54e912 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/d980486fde14484ba1dc0706df54e912 2024-12-10T14:25:48,643 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b5d028629f2540218b887030f88a7cce to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/C/b5d028629f2540218b887030f88a7cce 2024-12-10T14:25:48,646 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/recovered.edits/490.seqid to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff/recovered.edits/490.seqid 2024-12-10T14:25:48,647 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/3cb281b62d072b2e7312c326c99dffff 2024-12-10T14:25:48,647 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T14:25:48,652 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:48,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-10T14:25:48,661 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T14:25:48,694 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T14:25:48,695 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:48,695 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T14:25:48,695 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733840748695"}]},"ts":"9223372036854775807"} 2024-12-10T14:25:48,698 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T14:25:48,698 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3cb281b62d072b2e7312c326c99dffff, NAME => 'TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T14:25:48,698 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T14:25:48,698 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733840748698"}]},"ts":"9223372036854775807"} 2024-12-10T14:25:48,700 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T14:25:48,704 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:48,705 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 86 msec 2024-12-10T14:25:48,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-10T14:25:48,726 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-10T14:25:48,740 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=247 (was 219) Potentially hanging thread: RS:0;db1d50717577:46699-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_105012982_22 at /127.0.0.1:34954 [Waiting for operation #240] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x540ea891-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1890355422_22 at /127.0.0.1:40510 [Waiting for operation #181] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x540ea891-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/db1d50717577:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_105012982_22 at /127.0.0.1:40378 [Waiting for operation #229] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x540ea891-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x540ea891-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=461 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=222 (was 55) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2550 (was 3069) 2024-12-10T14:25:48,749 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=247, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=222, ProcessCount=11, AvailableMemoryMB=2550 2024-12-10T14:25:48,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:25:48,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:25:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:48,753 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:25:48,753 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:48,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-12-10T14:25:48,753 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:25:48,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:25:48,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741944_1120 (size=963) 2024-12-10T14:25:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:25:49,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:25:49,162 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:25:49,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741945_1121 (size=53) 2024-12-10T14:25:49,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:25:49,569 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:49,569 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 18673fce5a633353d821462d51dbbd4b, disabling compactions & flushes 2024-12-10T14:25:49,569 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,569 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,569 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. after waiting 0 ms 2024-12-10T14:25:49,569 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,569 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,569 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:49,570 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:25:49,571 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733840749570"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733840749570"}]},"ts":"1733840749570"} 2024-12-10T14:25:49,572 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:25:49,573 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:25:49,573 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840749573"}]},"ts":"1733840749573"} 2024-12-10T14:25:49,574 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T14:25:49,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, ASSIGN}] 2024-12-10T14:25:49,578 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, ASSIGN 2024-12-10T14:25:49,579 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, ASSIGN; state=OFFLINE, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=false 2024-12-10T14:25:49,730 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:49,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:25:49,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:25:49,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:49,886 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,886 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:25:49,886 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,886 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:49,886 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,886 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,888 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,889 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:49,889 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18673fce5a633353d821462d51dbbd4b columnFamilyName A 2024-12-10T14:25:49,889 DEBUG [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:49,890 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(327): Store=18673fce5a633353d821462d51dbbd4b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:49,890 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,891 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:49,892 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18673fce5a633353d821462d51dbbd4b columnFamilyName B 2024-12-10T14:25:49,892 DEBUG [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:49,892 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(327): Store=18673fce5a633353d821462d51dbbd4b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:49,892 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,893 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:49,893 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18673fce5a633353d821462d51dbbd4b columnFamilyName C 2024-12-10T14:25:49,893 DEBUG [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:49,894 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(327): Store=18673fce5a633353d821462d51dbbd4b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:49,894 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,895 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,895 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,897 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:25:49,898 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:49,899 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:25:49,900 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened 18673fce5a633353d821462d51dbbd4b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62392374, jitterRate=-0.07028117775917053}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:25:49,901 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:49,901 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., pid=37, masterSystemTime=1733840749882 2024-12-10T14:25:49,903 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,903 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:49,903 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:49,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-10T14:25:49,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 in 173 msec 2024-12-10T14:25:49,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-10T14:25:49,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, ASSIGN in 328 msec 2024-12-10T14:25:49,908 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:25:49,908 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840749908"}]},"ts":"1733840749908"} 2024-12-10T14:25:49,909 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T14:25:49,911 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:25:49,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1610 sec 2024-12-10T14:25:50,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-10T14:25:50,859 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-12-10T14:25:50,861 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x350b322d to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26401a5f 2024-12-10T14:25:50,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@407e6b5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:50,870 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:50,872 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:50,874 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:25:50,876 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:25:50,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:25:50,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:25:50,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T14:25:50,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741946_1122 (size=999) 2024-12-10T14:25:51,301 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T14:25:51,301 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T14:25:51,305 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:25:51,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, REOPEN/MOVE}] 2024-12-10T14:25:51,314 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, REOPEN/MOVE 2024-12-10T14:25:51,314 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,315 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:25:51,315 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:25:51,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:51,467 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing 18673fce5a633353d821462d51dbbd4b, disabling compactions & flushes 2024-12-10T14:25:51,467 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. after waiting 0 ms 2024-12-10T14:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,472 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T14:25:51,472 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,472 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:51,472 WARN [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: 18673fce5a633353d821462d51dbbd4b to self. 2024-12-10T14:25:51,474 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,474 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=CLOSED 2024-12-10T14:25:51,477 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-10T14:25:51,477 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 in 160 msec 2024-12-10T14:25:51,477 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, REOPEN/MOVE; state=CLOSED, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=true 2024-12-10T14:25:51,628 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:25:51,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:51,784 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,784 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:25:51,784 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,785 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:25:51,785 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,785 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,787 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,788 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:51,792 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18673fce5a633353d821462d51dbbd4b columnFamilyName A 2024-12-10T14:25:51,794 DEBUG [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:51,795 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(327): Store=18673fce5a633353d821462d51dbbd4b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:51,795 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,796 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:51,796 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18673fce5a633353d821462d51dbbd4b columnFamilyName B 2024-12-10T14:25:51,796 DEBUG [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:51,796 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(327): Store=18673fce5a633353d821462d51dbbd4b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:51,797 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,797 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:25:51,797 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 18673fce5a633353d821462d51dbbd4b columnFamilyName C 2024-12-10T14:25:51,797 DEBUG [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:51,798 INFO [StoreOpener-18673fce5a633353d821462d51dbbd4b-1 {}] regionserver.HStore(327): Store=18673fce5a633353d821462d51dbbd4b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:25:51,798 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,799 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,800 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,801 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:25:51,803 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,804 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened 18673fce5a633353d821462d51dbbd4b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73735639, jitterRate=0.0987466424703598}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:25:51,805 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:51,806 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., pid=42, masterSystemTime=1733840751781 2024-12-10T14:25:51,807 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,807 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:51,808 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=OPEN, openSeqNum=5, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-12-10T14:25:51,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 in 180 msec 2024-12-10T14:25:51,812 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-10T14:25:51,812 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, REOPEN/MOVE in 497 msec 2024-12-10T14:25:51,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-10T14:25:51,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 509 msec 2024-12-10T14:25:51,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 932 msec 2024-12-10T14:25:51,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-10T14:25:51,825 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bad2e85 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c820ef9 2024-12-10T14:25:51,832 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b55744e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,833 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x328f994d to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e3a4420 2024-12-10T14:25:51,836 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@454f1431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,837 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19a533a3 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42e904d8 2024-12-10T14:25:51,842 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@505d5ccd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,844 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465dc764 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4c53ed 2024-12-10T14:25:51,847 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb464a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,848 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-12-10T14:25:51,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,852 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b727d6e to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14c16cd4 2024-12-10T14:25:51,855 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a52344f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,856 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c7940d9 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@341384e 2024-12-10T14:25:51,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ba8425, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,862 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c38ee58 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26b120d9 2024-12-10T14:25:51,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7af61386, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,865 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x630684bf to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c1ec7ee 2024-12-10T14:25:51,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63e87c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:25:51,873 DEBUG [hconnection-0x2c14e149-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,874 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:51,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-10T14:25:51,875 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:51,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T14:25:51,876 DEBUG [hconnection-0x28f46626-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,876 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:51,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:51,877 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,877 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,878 DEBUG [hconnection-0x89c13fb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,878 DEBUG [hconnection-0x1978602e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,879 DEBUG [hconnection-0x319be96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,879 DEBUG [hconnection-0x4ce04f8e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,880 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,880 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,880 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,880 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48070, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,881 DEBUG [hconnection-0x7ee0707e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,882 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,886 DEBUG [hconnection-0x665ea150-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,887 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:51,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:25:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:51,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:51,907 DEBUG [hconnection-0x3a54e133-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:25:51,908 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:25:51,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:51,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:51,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840811944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840811946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840811950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:51,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840811950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:51,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840811950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:51,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121041c8c2ad5c3e429fa28003e9c147bbc9_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840751888/Put/seqid=0 2024-12-10T14:25:51,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T14:25:51,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741947_1123 (size=12154) 2024-12-10T14:25:52,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:52,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T14:25:52,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:52,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840812052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840812052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840812055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840812055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840812056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T14:25:52,181 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:52,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T14:25:52,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:52,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840812257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840812257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840812258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840812259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840812260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:52,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T14:25:52,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:52,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,381 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:52,386 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121041c8c2ad5c3e429fa28003e9c147bbc9_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121041c8c2ad5c3e429fa28003e9c147bbc9_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:52,388 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1e07dce988f8442c8f98706064270301, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:52,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1e07dce988f8442c8f98706064270301 is 175, key is test_row_0/A:col10/1733840751888/Put/seqid=0 2024-12-10T14:25:52,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741948_1124 (size=30955) 2024-12-10T14:25:52,421 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1e07dce988f8442c8f98706064270301 2024-12-10T14:25:52,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/c6c5da171a7b440090b68a43ef370e90 is 50, key is test_row_0/B:col10/1733840751888/Put/seqid=0 2024-12-10T14:25:52,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741949_1125 (size=12001) 2024-12-10T14:25:52,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/c6c5da171a7b440090b68a43ef370e90 2024-12-10T14:25:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T14:25:52,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:52,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T14:25:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:52,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/95c85aa3894449968fd51cf01876f6e7 is 50, key is test_row_0/C:col10/1733840751888/Put/seqid=0 2024-12-10T14:25:52,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741950_1126 (size=12001) 2024-12-10T14:25:52,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/95c85aa3894449968fd51cf01876f6e7 2024-12-10T14:25:52,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1e07dce988f8442c8f98706064270301 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1e07dce988f8442c8f98706064270301 2024-12-10T14:25:52,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1e07dce988f8442c8f98706064270301, entries=150, sequenceid=16, filesize=30.2 K 2024-12-10T14:25:52,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/c6c5da171a7b440090b68a43ef370e90 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/c6c5da171a7b440090b68a43ef370e90 2024-12-10T14:25:52,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/c6c5da171a7b440090b68a43ef370e90, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T14:25:52,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/95c85aa3894449968fd51cf01876f6e7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/95c85aa3894449968fd51cf01876f6e7 2024-12-10T14:25:52,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840812563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840812564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840812565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840812567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840812567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:52,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/95c85aa3894449968fd51cf01876f6e7, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T14:25:52,571 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 18673fce5a633353d821462d51dbbd4b in 676ms, sequenceid=16, compaction requested=false 2024-12-10T14:25:52,571 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-10T14:25:52,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:52,641 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:52,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T14:25:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:52,642 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:25:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:52,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:52,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:52,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109d0125ea33664f66838dec3ba9041d97_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840751945/Put/seqid=0 2024-12-10T14:25:52,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741951_1127 (size=12154) 2024-12-10T14:25:52,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:52,682 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109d0125ea33664f66838dec3ba9041d97_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109d0125ea33664f66838dec3ba9041d97_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:52,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/932424f52f3b42a79127d0562621a038, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:52,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/932424f52f3b42a79127d0562621a038 is 175, key is test_row_0/A:col10/1733840751945/Put/seqid=0 2024-12-10T14:25:52,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741952_1128 (size=30955) 2024-12-10T14:25:52,711 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/932424f52f3b42a79127d0562621a038 2024-12-10T14:25:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/174897c4b9a54632acc5b63065f14b33 is 50, key is test_row_0/B:col10/1733840751945/Put/seqid=0 2024-12-10T14:25:52,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741953_1129 (size=12001) 2024-12-10T14:25:52,737 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/174897c4b9a54632acc5b63065f14b33 2024-12-10T14:25:52,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/a66147102d584e94b936c06f5834891b is 50, key is test_row_0/C:col10/1733840751945/Put/seqid=0 2024-12-10T14:25:52,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741954_1130 (size=12001) 2024-12-10T14:25:52,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T14:25:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:53,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:53,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840813078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840813078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840813078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840813079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840813080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,183 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/a66147102d584e94b936c06f5834891b 2024-12-10T14:25:53,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840813183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840813184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840813184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840813184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840813187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/932424f52f3b42a79127d0562621a038 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/932424f52f3b42a79127d0562621a038 2024-12-10T14:25:53,195 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/932424f52f3b42a79127d0562621a038, entries=150, sequenceid=42, filesize=30.2 K 2024-12-10T14:25:53,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/174897c4b9a54632acc5b63065f14b33 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/174897c4b9a54632acc5b63065f14b33 2024-12-10T14:25:53,204 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/174897c4b9a54632acc5b63065f14b33, entries=150, sequenceid=42, filesize=11.7 K 2024-12-10T14:25:53,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/a66147102d584e94b936c06f5834891b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/a66147102d584e94b936c06f5834891b 2024-12-10T14:25:53,215 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/a66147102d584e94b936c06f5834891b, entries=150, sequenceid=42, filesize=11.7 K 2024-12-10T14:25:53,221 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 18673fce5a633353d821462d51dbbd4b in 575ms, sequenceid=42, compaction requested=false 2024-12-10T14:25:53,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:53,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:53,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-10T14:25:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-10T14:25:53,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-10T14:25:53,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3460 sec 2024-12-10T14:25:53,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.3500 sec 2024-12-10T14:25:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:53,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:25:53,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:53,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:53,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:53,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:53,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:53,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:53,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103ccc7f4b579e4385b0b3ad00ce59abc0_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840753391/Put/seqid=0 2024-12-10T14:25:53,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840813417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840813418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840813419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840813421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840813422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741955_1131 (size=14594) 2024-12-10T14:25:53,439 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:53,445 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103ccc7f4b579e4385b0b3ad00ce59abc0_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103ccc7f4b579e4385b0b3ad00ce59abc0_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:53,446 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/0370378b760f40bc8676c07ff6657fd5, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:53,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/0370378b760f40bc8676c07ff6657fd5 is 175, key is test_row_0/A:col10/1733840753391/Put/seqid=0 2024-12-10T14:25:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741956_1132 (size=39549) 2024-12-10T14:25:53,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840813524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840813524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840813525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840813528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840813529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,543 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T14:25:53,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840813730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840813730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840813730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840813734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840813734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:53,864 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/0370378b760f40bc8676c07ff6657fd5 2024-12-10T14:25:53,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3c308ff6930e450894096af79085f656 is 50, key is test_row_0/B:col10/1733840753391/Put/seqid=0 2024-12-10T14:25:53,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741957_1133 (size=12001) 2024-12-10T14:25:53,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T14:25:53,981 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-10T14:25:53,983 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:53,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-10T14:25:53,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T14:25:53,985 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:53,986 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:53,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:54,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840814032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840814033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840814034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840814037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840814039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T14:25:54,137 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:54,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T14:25:54,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:54,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:54,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:54,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:54,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3c308ff6930e450894096af79085f656 2024-12-10T14:25:54,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T14:25:54,292 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:54,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T14:25:54,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:54,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:54,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:54,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:54,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/46bf21c9a7aa44799ad8572464bbe367 is 50, key is test_row_0/C:col10/1733840753391/Put/seqid=0 2024-12-10T14:25:54,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741958_1134 (size=12001) 2024-12-10T14:25:54,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/46bf21c9a7aa44799ad8572464bbe367 2024-12-10T14:25:54,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/0370378b760f40bc8676c07ff6657fd5 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/0370378b760f40bc8676c07ff6657fd5 2024-12-10T14:25:54,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/0370378b760f40bc8676c07ff6657fd5, entries=200, sequenceid=55, filesize=38.6 K 2024-12-10T14:25:54,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3c308ff6930e450894096af79085f656 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3c308ff6930e450894096af79085f656 2024-12-10T14:25:54,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3c308ff6930e450894096af79085f656, entries=150, sequenceid=55, filesize=11.7 K 2024-12-10T14:25:54,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/46bf21c9a7aa44799ad8572464bbe367 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/46bf21c9a7aa44799ad8572464bbe367 2024-12-10T14:25:54,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/46bf21c9a7aa44799ad8572464bbe367, entries=150, sequenceid=55, filesize=11.7 K 2024-12-10T14:25:54,360 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 18673fce5a633353d821462d51dbbd4b in 969ms, sequenceid=55, compaction requested=true 2024-12-10T14:25:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:54,360 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:54,360 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:54,360 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:54,361 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:54,362 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:54,362 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:25:54,362 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:54,362 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1e07dce988f8442c8f98706064270301, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/932424f52f3b42a79127d0562621a038, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/0370378b760f40bc8676c07ff6657fd5] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=99.1 K 2024-12-10T14:25:54,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:54,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:54,362 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,362 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1e07dce988f8442c8f98706064270301, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/932424f52f3b42a79127d0562621a038, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/0370378b760f40bc8676c07ff6657fd5] 2024-12-10T14:25:54,363 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e07dce988f8442c8f98706064270301, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733840751888 2024-12-10T14:25:54,364 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:54,364 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 932424f52f3b42a79127d0562621a038, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733840751945 2024-12-10T14:25:54,364 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:25:54,364 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,364 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/c6c5da171a7b440090b68a43ef370e90, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/174897c4b9a54632acc5b63065f14b33, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3c308ff6930e450894096af79085f656] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.2 K 2024-12-10T14:25:54,365 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0370378b760f40bc8676c07ff6657fd5, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733840753078 2024-12-10T14:25:54,365 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c6c5da171a7b440090b68a43ef370e90, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733840751888 2024-12-10T14:25:54,365 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 174897c4b9a54632acc5b63065f14b33, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733840751945 2024-12-10T14:25:54,366 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c308ff6930e450894096af79085f656, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733840753078 2024-12-10T14:25:54,383 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:54,391 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:54,392 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/9155bc4f449c4abc82ce418e38ac070f is 50, key is test_row_0/B:col10/1733840753391/Put/seqid=0 2024-12-10T14:25:54,392 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412104f4d853e60c6458b96dd996d0c196454_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:54,396 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412104f4d853e60c6458b96dd996d0c196454_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:54,397 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104f4d853e60c6458b96dd996d0c196454_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:54,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741960_1136 (size=4469) 2024-12-10T14:25:54,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741959_1135 (size=12104) 2024-12-10T14:25:54,425 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T14:25:54,426 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54650, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T14:25:54,434 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/9155bc4f449c4abc82ce418e38ac070f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/9155bc4f449c4abc82ce418e38ac070f 2024-12-10T14:25:54,444 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:54,444 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into 9155bc4f449c4abc82ce418e38ac070f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:54,445 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:54,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T14:25:54,445 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840754360; duration=0sec 2024-12-10T14:25:54,445 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:54,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,445 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:25:54,445 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T14:25:54,445 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:54,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:54,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:54,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:54,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:54,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:54,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:54,447 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:54,447 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:25:54,447 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,448 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/95c85aa3894449968fd51cf01876f6e7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/a66147102d584e94b936c06f5834891b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/46bf21c9a7aa44799ad8572464bbe367] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.2 K 2024-12-10T14:25:54,449 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 95c85aa3894449968fd51cf01876f6e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733840751888 2024-12-10T14:25:54,449 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a66147102d584e94b936c06f5834891b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733840751945 2024-12-10T14:25:54,451 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 46bf21c9a7aa44799ad8572464bbe367, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733840753078 2024-12-10T14:25:54,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f8fcbe3d59c54e3bb32d5a00370c8f04_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840753420/Put/seqid=0 2024-12-10T14:25:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741961_1137 (size=12154) 2024-12-10T14:25:54,473 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#117 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:54,474 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/6fb78580c4794ad5a1c85cf9b026ca70 is 50, key is test_row_0/C:col10/1733840753391/Put/seqid=0 2024-12-10T14:25:54,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741962_1138 (size=12104) 2024-12-10T14:25:54,504 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/6fb78580c4794ad5a1c85cf9b026ca70 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/6fb78580c4794ad5a1c85cf9b026ca70 2024-12-10T14:25:54,512 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 6fb78580c4794ad5a1c85cf9b026ca70(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:54,512 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:54,512 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840754362; duration=0sec 2024-12-10T14:25:54,512 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:54,512 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:25:54,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:54,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:54,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840814552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840814553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840814553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840814553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840814554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T14:25:54,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840814658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840814660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840814660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840814660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840814660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,821 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#114 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:54,823 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/3c16834fe657460abef1ba5c594124da is 175, key is test_row_0/A:col10/1733840753391/Put/seqid=0 2024-12-10T14:25:54,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741963_1139 (size=31058) 2024-12-10T14:25:54,836 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/3c16834fe657460abef1ba5c594124da as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3c16834fe657460abef1ba5c594124da 2024-12-10T14:25:54,842 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 3c16834fe657460abef1ba5c594124da(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:54,842 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:54,842 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840754360; duration=0sec 2024-12-10T14:25:54,842 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:54,842 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:25:54,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840814860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840814862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840814864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840814865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:54,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840814867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:54,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:54,877 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f8fcbe3d59c54e3bb32d5a00370c8f04_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f8fcbe3d59c54e3bb32d5a00370c8f04_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:54,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/2ffe952d943a4b9d9d95b002a7b7b657, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:54,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/2ffe952d943a4b9d9d95b002a7b7b657 is 175, key is test_row_0/A:col10/1733840753420/Put/seqid=0 2024-12-10T14:25:54,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741964_1140 (size=30955) 2024-12-10T14:25:54,886 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/2ffe952d943a4b9d9d95b002a7b7b657 2024-12-10T14:25:54,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/5344d2d423a943eb96499057ece0b868 is 50, key is test_row_0/B:col10/1733840753420/Put/seqid=0 2024-12-10T14:25:54,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741965_1141 (size=12001) 2024-12-10T14:25:54,903 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/5344d2d423a943eb96499057ece0b868 2024-12-10T14:25:54,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/e29e56c717c1471696a385b9fb99c272 is 50, key is test_row_0/C:col10/1733840753420/Put/seqid=0 2024-12-10T14:25:54,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741966_1142 (size=12001) 2024-12-10T14:25:54,947 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/e29e56c717c1471696a385b9fb99c272 2024-12-10T14:25:54,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/2ffe952d943a4b9d9d95b002a7b7b657 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2ffe952d943a4b9d9d95b002a7b7b657 2024-12-10T14:25:54,968 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2ffe952d943a4b9d9d95b002a7b7b657, entries=150, sequenceid=79, filesize=30.2 K 2024-12-10T14:25:54,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/5344d2d423a943eb96499057ece0b868 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5344d2d423a943eb96499057ece0b868 2024-12-10T14:25:54,976 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5344d2d423a943eb96499057ece0b868, entries=150, sequenceid=79, filesize=11.7 K 2024-12-10T14:25:54,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/e29e56c717c1471696a385b9fb99c272 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e29e56c717c1471696a385b9fb99c272 2024-12-10T14:25:54,985 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e29e56c717c1471696a385b9fb99c272, entries=150, sequenceid=79, filesize=11.7 K 2024-12-10T14:25:54,987 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 18673fce5a633353d821462d51dbbd4b in 542ms, sequenceid=79, compaction requested=false 2024-12-10T14:25:54,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:54,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:54,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-10T14:25:54,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-10T14:25:54,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-10T14:25:54,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0030 sec 2024-12-10T14:25:54,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.0080 sec 2024-12-10T14:25:55,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T14:25:55,089 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-10T14:25:55,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:55,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-10T14:25:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T14:25:55,094 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:55,094 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:55,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:55,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:55,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:25:55,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:55,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:55,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:55,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:55,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:55,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:55,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102d6086cd95cd4961842c5eb04035c8c5_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840755168/Put/seqid=0 2024-12-10T14:25:55,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840815183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840815187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840815188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840815188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840815189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T14:25:55,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741967_1143 (size=14594) 2024-12-10T14:25:55,246 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:55,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:55,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:55,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840815289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840815290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840815292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840815292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840815292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T14:25:55,399 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:55,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:55,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:55,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840815492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840815494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840815495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840815495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840815495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:55,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:55,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:55,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,603 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:55,608 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102d6086cd95cd4961842c5eb04035c8c5_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102d6086cd95cd4961842c5eb04035c8c5_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:55,609 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/9ce358d1ce61434699158006db928cff, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:55,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/9ce358d1ce61434699158006db928cff is 175, key is test_row_0/A:col10/1733840755168/Put/seqid=0 2024-12-10T14:25:55,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741968_1144 (size=39549) 2024-12-10T14:25:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T14:25:55,705 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:55,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,706 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840815797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840815797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840815798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840815799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840815800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:55,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:55,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:55,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:55,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:55,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:55,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,012 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:56,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:56,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:56,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,016 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=98, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/9ce358d1ce61434699158006db928cff 2024-12-10T14:25:56,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/6dc60c15968741c79b532f86d22cd02b is 50, key is test_row_0/B:col10/1733840755168/Put/seqid=0 2024-12-10T14:25:56,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741969_1145 (size=12001) 2024-12-10T14:25:56,165 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:56,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:56,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:56,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T14:25:56,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:56,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840816304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:56,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:56,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840816305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:56,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:56,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840816306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:56,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:56,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840816307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:56,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:56,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840816308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:56,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:56,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:56,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:56,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/6dc60c15968741c79b532f86d22cd02b 2024-12-10T14:25:56,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/9f0e4ecb86594f12aa60a3a6cf342889 is 50, key is test_row_0/C:col10/1733840755168/Put/seqid=0 2024-12-10T14:25:56,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741970_1146 (size=12001) 2024-12-10T14:25:56,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:56,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:56,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:56,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,627 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:56,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:56,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:56,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,780 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:56,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:56,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:56,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:56,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/9f0e4ecb86594f12aa60a3a6cf342889 2024-12-10T14:25:56,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/9ce358d1ce61434699158006db928cff as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9ce358d1ce61434699158006db928cff 2024-12-10T14:25:56,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9ce358d1ce61434699158006db928cff, entries=200, sequenceid=98, filesize=38.6 K 2024-12-10T14:25:56,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/6dc60c15968741c79b532f86d22cd02b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6dc60c15968741c79b532f86d22cd02b 2024-12-10T14:25:56,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6dc60c15968741c79b532f86d22cd02b, entries=150, sequenceid=98, filesize=11.7 K 2024-12-10T14:25:56,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/9f0e4ecb86594f12aa60a3a6cf342889 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/9f0e4ecb86594f12aa60a3a6cf342889 2024-12-10T14:25:56,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/9f0e4ecb86594f12aa60a3a6cf342889, entries=150, sequenceid=98, filesize=11.7 K 2024-12-10T14:25:56,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 18673fce5a633353d821462d51dbbd4b in 1704ms, sequenceid=98, compaction requested=true 2024-12-10T14:25:56,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:56,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:56,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:56,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:56,873 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:56,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:56,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:56,873 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:56,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:56,875 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:56,875 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:25:56,875 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,875 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3c16834fe657460abef1ba5c594124da, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2ffe952d943a4b9d9d95b002a7b7b657, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9ce358d1ce61434699158006db928cff] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=99.2 K 2024-12-10T14:25:56,875 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,875 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3c16834fe657460abef1ba5c594124da, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2ffe952d943a4b9d9d95b002a7b7b657, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9ce358d1ce61434699158006db928cff] 2024-12-10T14:25:56,876 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:56,876 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:25:56,876 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,876 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c16834fe657460abef1ba5c594124da, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733840753078 2024-12-10T14:25:56,876 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/9155bc4f449c4abc82ce418e38ac070f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5344d2d423a943eb96499057ece0b868, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6dc60c15968741c79b532f86d22cd02b] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.3 K 2024-12-10T14:25:56,876 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ffe952d943a4b9d9d95b002a7b7b657, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733840753417 2024-12-10T14:25:56,876 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 9155bc4f449c4abc82ce418e38ac070f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733840753078 2024-12-10T14:25:56,877 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ce358d1ce61434699158006db928cff, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840754552 2024-12-10T14:25:56,877 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5344d2d423a943eb96499057ece0b868, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733840753417 2024-12-10T14:25:56,877 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 6dc60c15968741c79b532f86d22cd02b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840754552 2024-12-10T14:25:56,885 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:56,886 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#123 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:56,887 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412106df67e6b59764bb9a4021cd5c1ed228d_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:56,887 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/345376465be349be96bc11299aa1b7a1 is 50, key is test_row_0/B:col10/1733840755168/Put/seqid=0 2024-12-10T14:25:56,889 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412106df67e6b59764bb9a4021cd5c1ed228d_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:56,889 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106df67e6b59764bb9a4021cd5c1ed228d_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:56,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741972_1148 (size=4469) 2024-12-10T14:25:56,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741971_1147 (size=12207) 2024-12-10T14:25:56,933 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:56,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T14:25:56,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:56,934 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-10T14:25:56,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:56,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:56,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:56,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:56,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:56,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:56,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068db3a1e4b994065ac04f1321c5b0029_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840755188/Put/seqid=0 2024-12-10T14:25:56,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741973_1149 (size=12154) 2024-12-10T14:25:57,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T14:25:57,303 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#124 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:57,304 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/81d3dcdaa8164ff7a72587e42c23654d is 175, key is test_row_0/A:col10/1733840755168/Put/seqid=0 2024-12-10T14:25:57,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741974_1150 (size=31161) 2024-12-10T14:25:57,311 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/345376465be349be96bc11299aa1b7a1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/345376465be349be96bc11299aa1b7a1 2024-12-10T14:25:57,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:57,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:57,317 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into 345376465be349be96bc11299aa1b7a1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:57,317 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:57,317 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840756873; duration=0sec 2024-12-10T14:25:57,317 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:57,317 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:25:57,318 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:57,319 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:57,319 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:25:57,319 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:57,319 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/6fb78580c4794ad5a1c85cf9b026ca70, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e29e56c717c1471696a385b9fb99c272, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/9f0e4ecb86594f12aa60a3a6cf342889] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.3 K 2024-12-10T14:25:57,320 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fb78580c4794ad5a1c85cf9b026ca70, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733840753078 2024-12-10T14:25:57,320 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e29e56c717c1471696a385b9fb99c272, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733840753417 2024-12-10T14:25:57,321 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f0e4ecb86594f12aa60a3a6cf342889, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840754552 2024-12-10T14:25:57,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840817324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840817324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840817325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840817325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,329 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#126 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:57,330 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/355d75d27aed4df0a15a1549eb5a5e2c is 50, key is test_row_0/C:col10/1733840755168/Put/seqid=0 2024-12-10T14:25:57,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840817328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741975_1151 (size=12207) 2024-12-10T14:25:57,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:57,348 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/355d75d27aed4df0a15a1549eb5a5e2c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/355d75d27aed4df0a15a1549eb5a5e2c 2024-12-10T14:25:57,356 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068db3a1e4b994065ac04f1321c5b0029_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068db3a1e4b994065ac04f1321c5b0029_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:57,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/d8e94630bfbc41a4b20b40d104141d58, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:57,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/d8e94630bfbc41a4b20b40d104141d58 is 175, key is test_row_0/A:col10/1733840755188/Put/seqid=0 2024-12-10T14:25:57,360 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 355d75d27aed4df0a15a1549eb5a5e2c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:57,360 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:57,360 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840756873; duration=0sec 2024-12-10T14:25:57,361 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:57,361 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:25:57,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741976_1152 (size=30955) 2024-12-10T14:25:57,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840817426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840817429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840817429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840817429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840817432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840817630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840817631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840817632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840817633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840817638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,716 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/81d3dcdaa8164ff7a72587e42c23654d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/81d3dcdaa8164ff7a72587e42c23654d 2024-12-10T14:25:57,723 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 81d3dcdaa8164ff7a72587e42c23654d(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:57,723 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:57,723 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840756873; duration=0sec 2024-12-10T14:25:57,723 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:57,723 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:25:57,767 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/d8e94630bfbc41a4b20b40d104141d58 2024-12-10T14:25:57,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/6f46bfe626744f9a87ab4b3e7106a326 is 50, key is test_row_0/B:col10/1733840755188/Put/seqid=0 2024-12-10T14:25:57,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741977_1153 (size=12001) 2024-12-10T14:25:57,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840817933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840817935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840817937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840817937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840817941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,187 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/6f46bfe626744f9a87ab4b3e7106a326 2024-12-10T14:25:58,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/05338e4ed37b40c4ae95aeae566c6552 is 50, key is test_row_0/C:col10/1733840755188/Put/seqid=0 2024-12-10T14:25:58,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741978_1154 (size=12001) 2024-12-10T14:25:58,204 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/05338e4ed37b40c4ae95aeae566c6552 2024-12-10T14:25:58,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/d8e94630bfbc41a4b20b40d104141d58 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/d8e94630bfbc41a4b20b40d104141d58 2024-12-10T14:25:58,218 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/d8e94630bfbc41a4b20b40d104141d58, entries=150, sequenceid=118, filesize=30.2 K 2024-12-10T14:25:58,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/6f46bfe626744f9a87ab4b3e7106a326 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6f46bfe626744f9a87ab4b3e7106a326 2024-12-10T14:25:58,223 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6f46bfe626744f9a87ab4b3e7106a326, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T14:25:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/05338e4ed37b40c4ae95aeae566c6552 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/05338e4ed37b40c4ae95aeae566c6552 2024-12-10T14:25:58,229 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/05338e4ed37b40c4ae95aeae566c6552, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T14:25:58,230 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 18673fce5a633353d821462d51dbbd4b in 1296ms, sequenceid=118, compaction requested=false 2024-12-10T14:25:58,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:58,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:58,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-10T14:25:58,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-10T14:25:58,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-10T14:25:58,233 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1370 sec 2024-12-10T14:25:58,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 3.1420 sec 2024-12-10T14:25:58,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:58,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-10T14:25:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108f0aa1cd986a4df2a15822f6f5c378bf_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840757323/Put/seqid=0 2024-12-10T14:25:58,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741979_1155 (size=12304) 2024-12-10T14:25:58,457 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:58,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840818454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840818455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840818455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840818457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840818457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,463 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108f0aa1cd986a4df2a15822f6f5c378bf_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108f0aa1cd986a4df2a15822f6f5c378bf_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:58,464 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/54f02e6b3c4642a58591303f667e8da0, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:58,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/54f02e6b3c4642a58591303f667e8da0 is 175, key is test_row_0/A:col10/1733840757323/Put/seqid=0 2024-12-10T14:25:58,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741980_1156 (size=31105) 2024-12-10T14:25:58,472 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=139, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/54f02e6b3c4642a58591303f667e8da0 2024-12-10T14:25:58,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e97bd19a261447c69b73b0a8b97aff93 is 50, key is test_row_0/B:col10/1733840757323/Put/seqid=0 2024-12-10T14:25:58,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741981_1157 (size=12151) 2024-12-10T14:25:58,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e97bd19a261447c69b73b0a8b97aff93 2024-12-10T14:25:58,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/fb088ab4181c4d8fb87e2cca94406787 is 50, key is test_row_0/C:col10/1733840757323/Put/seqid=0 2024-12-10T14:25:58,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741982_1158 (size=12151) 2024-12-10T14:25:58,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/fb088ab4181c4d8fb87e2cca94406787 2024-12-10T14:25:58,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/54f02e6b3c4642a58591303f667e8da0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/54f02e6b3c4642a58591303f667e8da0 2024-12-10T14:25:58,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/54f02e6b3c4642a58591303f667e8da0, entries=150, sequenceid=139, filesize=30.4 K 2024-12-10T14:25:58,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e97bd19a261447c69b73b0a8b97aff93 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e97bd19a261447c69b73b0a8b97aff93 2024-12-10T14:25:58,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e97bd19a261447c69b73b0a8b97aff93, entries=150, sequenceid=139, filesize=11.9 K 2024-12-10T14:25:58,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/fb088ab4181c4d8fb87e2cca94406787 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fb088ab4181c4d8fb87e2cca94406787 2024-12-10T14:25:58,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fb088ab4181c4d8fb87e2cca94406787, entries=150, sequenceid=139, filesize=11.9 K 2024-12-10T14:25:58,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 18673fce5a633353d821462d51dbbd4b in 102ms, sequenceid=139, compaction requested=true 2024-12-10T14:25:58,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:58,540 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:58,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:58,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:58,541 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:58,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:58,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:58,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:58,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:58,542 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:58,542 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:25:58,542 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:58,542 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:58,542 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/81d3dcdaa8164ff7a72587e42c23654d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/d8e94630bfbc41a4b20b40d104141d58, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/54f02e6b3c4642a58591303f667e8da0] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=91.0 K 2024-12-10T14:25:58,542 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:25:58,542 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:58,542 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:58,542 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/81d3dcdaa8164ff7a72587e42c23654d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/d8e94630bfbc41a4b20b40d104141d58, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/54f02e6b3c4642a58591303f667e8da0] 2024-12-10T14:25:58,542 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/345376465be349be96bc11299aa1b7a1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6f46bfe626744f9a87ab4b3e7106a326, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e97bd19a261447c69b73b0a8b97aff93] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.5 K 2024-12-10T14:25:58,543 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81d3dcdaa8164ff7a72587e42c23654d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840754552 2024-12-10T14:25:58,543 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 345376465be349be96bc11299aa1b7a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840754552 2024-12-10T14:25:58,543 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8e94630bfbc41a4b20b40d104141d58, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733840755186 2024-12-10T14:25:58,544 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f46bfe626744f9a87ab4b3e7106a326, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733840755186 2024-12-10T14:25:58,544 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54f02e6b3c4642a58591303f667e8da0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1733840757323 2024-12-10T14:25:58,545 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e97bd19a261447c69b73b0a8b97aff93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1733840757323 2024-12-10T14:25:58,557 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:58,559 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#133 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:58,560 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210c0b44c6329e049669d92bdc95bbb730b_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:58,560 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e76f11b57ded4593be90c9a0b7a37f54 is 50, key is test_row_0/B:col10/1733840757323/Put/seqid=0 2024-12-10T14:25:58,562 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210c0b44c6329e049669d92bdc95bbb730b_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:58,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:58,562 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c0b44c6329e049669d92bdc95bbb730b_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:58,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T14:25:58,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:58,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:58,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:58,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840818577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840818577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840818578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840818581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840818581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741983_1159 (size=4469) 2024-12-10T14:25:58,599 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#132 average throughput is 0.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:58,600 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/9f27fd110777476eb3ea9491700138f2 is 175, key is test_row_0/A:col10/1733840757323/Put/seqid=0 2024-12-10T14:25:58,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108abe9fc376cd4285b194b87d04a18db4_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840758455/Put/seqid=0 2024-12-10T14:25:58,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741985_1161 (size=31413) 2024-12-10T14:25:58,634 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/9f27fd110777476eb3ea9491700138f2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9f27fd110777476eb3ea9491700138f2 2024-12-10T14:25:58,643 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 9f27fd110777476eb3ea9491700138f2(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:58,643 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:58,643 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840758540; duration=0sec 2024-12-10T14:25:58,644 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:58,644 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:25:58,644 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:58,647 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:58,647 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:25:58,647 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:58,647 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/355d75d27aed4df0a15a1549eb5a5e2c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/05338e4ed37b40c4ae95aeae566c6552, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fb088ab4181c4d8fb87e2cca94406787] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.5 K 2024-12-10T14:25:58,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741984_1160 (size=12459) 2024-12-10T14:25:58,648 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 355d75d27aed4df0a15a1549eb5a5e2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840754552 2024-12-10T14:25:58,648 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05338e4ed37b40c4ae95aeae566c6552, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733840755186 2024-12-10T14:25:58,649 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb088ab4181c4d8fb87e2cca94406787, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1733840757323 2024-12-10T14:25:58,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741986_1162 (size=12304) 2024-12-10T14:25:58,653 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:58,660 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108abe9fc376cd4285b194b87d04a18db4_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108abe9fc376cd4285b194b87d04a18db4_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:58,663 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/23587deb52f441299c81816952ef5c1d, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:58,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/23587deb52f441299c81816952ef5c1d is 175, key is test_row_0/A:col10/1733840758455/Put/seqid=0 2024-12-10T14:25:58,664 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e76f11b57ded4593be90c9a0b7a37f54 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e76f11b57ded4593be90c9a0b7a37f54 2024-12-10T14:25:58,670 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#135 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:58,671 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/da817e77110a4b1493f4ba405e56a453 is 50, key is test_row_0/C:col10/1733840757323/Put/seqid=0 2024-12-10T14:25:58,675 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into e76f11b57ded4593be90c9a0b7a37f54(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:58,675 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:58,675 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840758541; duration=0sec 2024-12-10T14:25:58,675 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:58,675 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:25:58,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840818683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840818683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840818683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840818685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840818686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741987_1163 (size=31105) 2024-12-10T14:25:58,699 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/23587deb52f441299c81816952ef5c1d 2024-12-10T14:25:58,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741988_1164 (size=12459) 2024-12-10T14:25:58,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/b2e7c7536b2f439897886ff73c5164d4 is 50, key is test_row_0/B:col10/1733840758455/Put/seqid=0 2024-12-10T14:25:58,717 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/da817e77110a4b1493f4ba405e56a453 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/da817e77110a4b1493f4ba405e56a453 2024-12-10T14:25:58,726 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into da817e77110a4b1493f4ba405e56a453(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:58,726 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:58,726 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840758541; duration=0sec 2024-12-10T14:25:58,726 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:58,726 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:25:58,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741989_1165 (size=12151) 2024-12-10T14:25:58,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/b2e7c7536b2f439897886ff73c5164d4 2024-12-10T14:25:58,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/690c2e2297bf4b65a85957e1b95dde09 is 50, key is test_row_0/C:col10/1733840758455/Put/seqid=0 2024-12-10T14:25:58,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741990_1166 (size=12151) 2024-12-10T14:25:58,756 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/690c2e2297bf4b65a85957e1b95dde09 2024-12-10T14:25:58,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/23587deb52f441299c81816952ef5c1d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/23587deb52f441299c81816952ef5c1d 2024-12-10T14:25:58,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/23587deb52f441299c81816952ef5c1d, entries=150, sequenceid=158, filesize=30.4 K 2024-12-10T14:25:58,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/b2e7c7536b2f439897886ff73c5164d4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b2e7c7536b2f439897886ff73c5164d4 2024-12-10T14:25:58,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b2e7c7536b2f439897886ff73c5164d4, entries=150, sequenceid=158, filesize=11.9 K 2024-12-10T14:25:58,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/690c2e2297bf4b65a85957e1b95dde09 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/690c2e2297bf4b65a85957e1b95dde09 2024-12-10T14:25:58,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/690c2e2297bf4b65a85957e1b95dde09, entries=150, sequenceid=158, filesize=11.9 K 2024-12-10T14:25:58,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 18673fce5a633353d821462d51dbbd4b in 218ms, sequenceid=158, compaction requested=false 2024-12-10T14:25:58,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:58,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T14:25:58,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:58,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:58,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:58,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:58,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107fb25ca611984037a7aa8cdb2c9f2ccd_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840758886/Put/seqid=0 2024-12-10T14:25:58,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840818902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840818904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840818905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840818906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741991_1167 (size=14794) 2024-12-10T14:25:58,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:58,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840818907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:58,911 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:58,916 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107fb25ca611984037a7aa8cdb2c9f2ccd_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107fb25ca611984037a7aa8cdb2c9f2ccd_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:58,917 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/01487750a17040f3a2423e7d4e33f54c, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:58,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/01487750a17040f3a2423e7d4e33f54c is 175, key is test_row_0/A:col10/1733840758886/Put/seqid=0 2024-12-10T14:25:58,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741992_1168 (size=39749) 2024-12-10T14:25:58,939 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=181, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/01487750a17040f3a2423e7d4e33f54c 2024-12-10T14:25:58,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/d49aafad3a714d769a6ccdce7342eddf is 50, key is test_row_0/B:col10/1733840758886/Put/seqid=0 2024-12-10T14:25:58,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741993_1169 (size=12151) 2024-12-10T14:25:58,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/d49aafad3a714d769a6ccdce7342eddf 2024-12-10T14:25:58,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/8a405f8c3e3541ccbd34bd12c556931f is 50, key is test_row_0/C:col10/1733840758886/Put/seqid=0 2024-12-10T14:25:58,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741994_1170 (size=12151) 2024-12-10T14:25:59,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840819007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840819008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840819009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840819008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840819032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T14:25:59,198 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-10T14:25:59,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-10T14:25:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T14:25:59,201 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:59,202 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:59,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:59,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840819211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840819234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840819234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840819234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840819235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T14:25:59,354 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:59,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T14:25:59,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:59,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:59,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:59,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:59,386 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/8a405f8c3e3541ccbd34bd12c556931f 2024-12-10T14:25:59,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/01487750a17040f3a2423e7d4e33f54c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/01487750a17040f3a2423e7d4e33f54c 2024-12-10T14:25:59,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/01487750a17040f3a2423e7d4e33f54c, entries=200, sequenceid=181, filesize=38.8 K 2024-12-10T14:25:59,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/d49aafad3a714d769a6ccdce7342eddf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d49aafad3a714d769a6ccdce7342eddf 2024-12-10T14:25:59,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d49aafad3a714d769a6ccdce7342eddf, entries=150, sequenceid=181, filesize=11.9 K 2024-12-10T14:25:59,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/8a405f8c3e3541ccbd34bd12c556931f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8a405f8c3e3541ccbd34bd12c556931f 2024-12-10T14:25:59,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8a405f8c3e3541ccbd34bd12c556931f, entries=150, sequenceid=181, filesize=11.9 K 2024-12-10T14:25:59,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 18673fce5a633353d821462d51dbbd4b in 526ms, sequenceid=181, compaction requested=true 2024-12-10T14:25:59,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:59,413 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:59,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:25:59,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:59,415 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:59,415 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:59,415 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:25:59,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:25:59,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:59,415 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:25:59,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:59,415 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9f27fd110777476eb3ea9491700138f2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/23587deb52f441299c81816952ef5c1d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/01487750a17040f3a2423e7d4e33f54c] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=99.9 K 2024-12-10T14:25:59,415 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,415 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9f27fd110777476eb3ea9491700138f2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/23587deb52f441299c81816952ef5c1d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/01487750a17040f3a2423e7d4e33f54c] 2024-12-10T14:25:59,416 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f27fd110777476eb3ea9491700138f2, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1733840757323 2024-12-10T14:25:59,416 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23587deb52f441299c81816952ef5c1d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733840758455 2024-12-10T14:25:59,417 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:59,417 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01487750a17040f3a2423e7d4e33f54c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733840758575 2024-12-10T14:25:59,417 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:25:59,417 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,418 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e76f11b57ded4593be90c9a0b7a37f54, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b2e7c7536b2f439897886ff73c5164d4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d49aafad3a714d769a6ccdce7342eddf] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.9 K 2024-12-10T14:25:59,418 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e76f11b57ded4593be90c9a0b7a37f54, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1733840757323 2024-12-10T14:25:59,419 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b2e7c7536b2f439897886ff73c5164d4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733840758455 2024-12-10T14:25:59,419 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting d49aafad3a714d769a6ccdce7342eddf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733840758579 2024-12-10T14:25:59,426 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:59,429 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#142 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:59,429 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412100e04c4c4f0b64b1497f1913bfb9c6259_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:59,429 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/146748eb451441208095649b0b88ede0 is 50, key is test_row_0/B:col10/1733840758886/Put/seqid=0 2024-12-10T14:25:59,432 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412100e04c4c4f0b64b1497f1913bfb9c6259_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:59,432 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100e04c4c4f0b64b1497f1913bfb9c6259_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741996_1172 (size=4469) 2024-12-10T14:25:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741995_1171 (size=12561) 2024-12-10T14:25:59,454 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#141 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:59,455 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/2020bbdf9e8e4874bb8b0deb66cf4cf6 is 175, key is test_row_0/A:col10/1733840758886/Put/seqid=0 2024-12-10T14:25:59,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741997_1173 (size=31515) 2024-12-10T14:25:59,474 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/2020bbdf9e8e4874bb8b0deb66cf4cf6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2020bbdf9e8e4874bb8b0deb66cf4cf6 2024-12-10T14:25:59,479 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 2020bbdf9e8e4874bb8b0deb66cf4cf6(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:59,479 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:59,479 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840759413; duration=0sec 2024-12-10T14:25:59,479 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:25:59,479 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:25:59,479 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:25:59,481 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:25:59,481 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:25:59,482 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,482 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/da817e77110a4b1493f4ba405e56a453, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/690c2e2297bf4b65a85957e1b95dde09, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8a405f8c3e3541ccbd34bd12c556931f] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=35.9 K 2024-12-10T14:25:59,482 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting da817e77110a4b1493f4ba405e56a453, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1733840757323 2024-12-10T14:25:59,483 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 690c2e2297bf4b65a85957e1b95dde09, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733840758455 2024-12-10T14:25:59,483 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a405f8c3e3541ccbd34bd12c556931f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733840758579 2024-12-10T14:25:59,494 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#143 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:25:59,495 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/66ded0a0026849c98fa957305a022b1d is 50, key is test_row_0/C:col10/1733840758886/Put/seqid=0 2024-12-10T14:25:59,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T14:25:59,507 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:59,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T14:25:59,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,507 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-10T14:25:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:59,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741998_1174 (size=12561) 2024-12-10T14:25:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:59,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:59,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210945e1e4d8fff4d02896e77e39ac15226_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840758893/Put/seqid=0 2024-12-10T14:25:59,527 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/66ded0a0026849c98fa957305a022b1d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/66ded0a0026849c98fa957305a022b1d 2024-12-10T14:25:59,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741999_1175 (size=12304) 2024-12-10T14:25:59,534 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 66ded0a0026849c98fa957305a022b1d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:59,535 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:59,535 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840759415; duration=0sec 2024-12-10T14:25:59,535 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:59,535 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:25:59,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:59,545 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210945e1e4d8fff4d02896e77e39ac15226_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210945e1e4d8fff4d02896e77e39ac15226_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:59,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/982b9a627c644292999927898d459434, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:59,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/982b9a627c644292999927898d459434 is 175, key is test_row_0/A:col10/1733840758893/Put/seqid=0 2024-12-10T14:25:59,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840819549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742000_1176 (size=31105) 2024-12-10T14:25:59,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840819551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840819552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840819554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,561 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/982b9a627c644292999927898d459434 2024-12-10T14:25:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840819553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/80e1158b18294667b6bbab06a71e0502 is 50, key is test_row_0/B:col10/1733840758893/Put/seqid=0 2024-12-10T14:25:59,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742001_1177 (size=12151) 2024-12-10T14:25:59,585 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/80e1158b18294667b6bbab06a71e0502 2024-12-10T14:25:59,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/762addc4abb6485e999c0f913060db5c is 50, key is test_row_0/C:col10/1733840758893/Put/seqid=0 2024-12-10T14:25:59,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742002_1178 (size=12151) 2024-12-10T14:25:59,606 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/762addc4abb6485e999c0f913060db5c 2024-12-10T14:25:59,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/982b9a627c644292999927898d459434 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/982b9a627c644292999927898d459434 2024-12-10T14:25:59,617 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/982b9a627c644292999927898d459434, entries=150, sequenceid=199, filesize=30.4 K 2024-12-10T14:25:59,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/80e1158b18294667b6bbab06a71e0502 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/80e1158b18294667b6bbab06a71e0502 2024-12-10T14:25:59,623 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/80e1158b18294667b6bbab06a71e0502, entries=150, sequenceid=199, filesize=11.9 K 2024-12-10T14:25:59,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/762addc4abb6485e999c0f913060db5c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/762addc4abb6485e999c0f913060db5c 2024-12-10T14:25:59,630 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/762addc4abb6485e999c0f913060db5c, entries=150, sequenceid=199, filesize=11.9 K 2024-12-10T14:25:59,631 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 18673fce5a633353d821462d51dbbd4b in 124ms, sequenceid=199, compaction requested=false 2024-12-10T14:25:59,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:59,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-10T14:25:59,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-10T14:25:59,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-10T14:25:59,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 431 msec 2024-12-10T14:25:59,637 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 436 msec 2024-12-10T14:25:59,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:59,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-10T14:25:59,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:25:59,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:59,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:25:59,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:59,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:25:59,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:25:59,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108c99c3b1d6a2494fa5b71c11bd1d5b4c_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840759551/Put/seqid=0 2024-12-10T14:25:59,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840819673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840819677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840819679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840819680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742003_1179 (size=12304) 2024-12-10T14:25:59,685 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:25:59,690 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108c99c3b1d6a2494fa5b71c11bd1d5b4c_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108c99c3b1d6a2494fa5b71c11bd1d5b4c_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:25:59,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,692 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/83760d295ee24efab581fc423959e7e3, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:25:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840819688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/83760d295ee24efab581fc423959e7e3 is 175, key is test_row_0/A:col10/1733840759551/Put/seqid=0 2024-12-10T14:25:59,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742004_1180 (size=31105) 2024-12-10T14:25:59,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840819784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840819781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840819784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840819784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840819793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T14:25:59,804 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-10T14:25:59,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:25:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-10T14:25:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T14:25:59,806 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:25:59,807 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:25:59,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:25:59,860 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/146748eb451441208095649b0b88ede0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/146748eb451441208095649b0b88ede0 2024-12-10T14:25:59,866 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into 146748eb451441208095649b0b88ede0(size=12.3 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:25:59,866 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:25:59,866 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840759415; duration=0sec 2024-12-10T14:25:59,866 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:25:59,866 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:25:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T14:25:59,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:25:59,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T14:25:59,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:25:59,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:25:59,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:59,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:25:59,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840819988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840819988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840819988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840819988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:25:59,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:25:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840819997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,103 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=220, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/83760d295ee24efab581fc423959e7e3 2024-12-10T14:26:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T14:26:00,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:00,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/5463973440a147cc931b572f82d81ad3 is 50, key is test_row_0/B:col10/1733840759551/Put/seqid=0 2024-12-10T14:26:00,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T14:26:00,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:00,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742005_1181 (size=12151) 2024-12-10T14:26:00,265 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:00,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T14:26:00,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:00,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840820291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840820292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840820292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840820292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840820298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T14:26:00,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:00,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T14:26:00,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:00,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/5463973440a147cc931b572f82d81ad3 2024-12-10T14:26:00,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/7634b2eaae2e444fa2672976361b8645 is 50, key is test_row_0/C:col10/1733840759551/Put/seqid=0 2024-12-10T14:26:00,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742006_1182 (size=12151) 2024-12-10T14:26:00,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/7634b2eaae2e444fa2672976361b8645 2024-12-10T14:26:00,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/83760d295ee24efab581fc423959e7e3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/83760d295ee24efab581fc423959e7e3 2024-12-10T14:26:00,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/83760d295ee24efab581fc423959e7e3, entries=150, sequenceid=220, filesize=30.4 K 2024-12-10T14:26:00,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/5463973440a147cc931b572f82d81ad3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5463973440a147cc931b572f82d81ad3 2024-12-10T14:26:00,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5463973440a147cc931b572f82d81ad3, entries=150, sequenceid=220, filesize=11.9 K 2024-12-10T14:26:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/7634b2eaae2e444fa2672976361b8645 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/7634b2eaae2e444fa2672976361b8645 2024-12-10T14:26:00,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/7634b2eaae2e444fa2672976361b8645, entries=150, sequenceid=220, filesize=11.9 K 2024-12-10T14:26:00,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 18673fce5a633353d821462d51dbbd4b in 892ms, sequenceid=220, compaction requested=true 2024-12-10T14:26:00,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:00,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:00,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:00,553 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:00,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:00,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:00,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:00,553 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:00,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:00,554 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:00,554 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:00,554 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:26:00,554 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:26:00,554 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,554 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,554 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/146748eb451441208095649b0b88ede0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/80e1158b18294667b6bbab06a71e0502, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5463973440a147cc931b572f82d81ad3] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.0 K 2024-12-10T14:26:00,554 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2020bbdf9e8e4874bb8b0deb66cf4cf6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/982b9a627c644292999927898d459434, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/83760d295ee24efab581fc423959e7e3] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=91.5 K 2024-12-10T14:26:00,554 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,554 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2020bbdf9e8e4874bb8b0deb66cf4cf6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/982b9a627c644292999927898d459434, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/83760d295ee24efab581fc423959e7e3] 2024-12-10T14:26:00,554 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 146748eb451441208095649b0b88ede0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733840758579 2024-12-10T14:26:00,555 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2020bbdf9e8e4874bb8b0deb66cf4cf6, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733840758579 2024-12-10T14:26:00,555 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 80e1158b18294667b6bbab06a71e0502, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733840758893 2024-12-10T14:26:00,555 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 982b9a627c644292999927898d459434, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733840758893 2024-12-10T14:26:00,555 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5463973440a147cc931b572f82d81ad3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733840759548 2024-12-10T14:26:00,555 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83760d295ee24efab581fc423959e7e3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733840759548 2024-12-10T14:26:00,563 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:00,564 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#150 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:00,565 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dbb1303755634816a28660c30f6380cd is 50, key is test_row_0/B:col10/1733840759551/Put/seqid=0 2024-12-10T14:26:00,567 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210aff18bf4dc2f4797acde05af45aa219e_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:00,569 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210aff18bf4dc2f4797acde05af45aa219e_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:00,569 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210aff18bf4dc2f4797acde05af45aa219e_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:00,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:00,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T14:26:00,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,570 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-10T14:26:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:00,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742007_1183 (size=12663) 2024-12-10T14:26:00,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742008_1184 (size=4469) 2024-12-10T14:26:00,580 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#151 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:00,581 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/fa993a83c5754d36bc8afe664a60ff0d is 175, key is test_row_0/A:col10/1733840759551/Put/seqid=0 2024-12-10T14:26:00,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c40ff8ec329848139de47889c755f0c0_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840759677/Put/seqid=0 2024-12-10T14:26:00,583 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dbb1303755634816a28660c30f6380cd as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dbb1303755634816a28660c30f6380cd 2024-12-10T14:26:00,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742010_1186 (size=12304) 2024-12-10T14:26:00,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:00,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742009_1185 (size=31617) 2024-12-10T14:26:00,589 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into dbb1303755634816a28660c30f6380cd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:00,589 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:00,589 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840760553; duration=0sec 2024-12-10T14:26:00,589 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:00,589 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:26:00,590 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:00,591 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:00,591 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:26:00,591 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:00,591 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/66ded0a0026849c98fa957305a022b1d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/762addc4abb6485e999c0f913060db5c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/7634b2eaae2e444fa2672976361b8645] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.0 K 2024-12-10T14:26:00,592 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 66ded0a0026849c98fa957305a022b1d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1733840758579 2024-12-10T14:26:00,593 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 762addc4abb6485e999c0f913060db5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733840758893 2024-12-10T14:26:00,593 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c40ff8ec329848139de47889c755f0c0_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c40ff8ec329848139de47889c755f0c0_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:00,593 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7634b2eaae2e444fa2672976361b8645, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733840759548 2024-12-10T14:26:00,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/24a1be64548c46719a4c9822aa62e535, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:00,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/24a1be64548c46719a4c9822aa62e535 is 175, key is test_row_0/A:col10/1733840759677/Put/seqid=0 2024-12-10T14:26:00,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742011_1187 (size=31105) 2024-12-10T14:26:00,607 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/24a1be64548c46719a4c9822aa62e535 2024-12-10T14:26:00,608 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:00,609 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/cc411183c9a64116b1aa9b0809a30ee6 is 50, key is test_row_0/C:col10/1733840759551/Put/seqid=0 2024-12-10T14:26:00,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/ba3b7ec1876a4098998381d3154198cc is 50, key is test_row_0/B:col10/1733840759677/Put/seqid=0 2024-12-10T14:26:00,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742012_1188 (size=12663) 2024-12-10T14:26:00,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742013_1189 (size=12151) 2024-12-10T14:26:00,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:00,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840820808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840820812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840820811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840820814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840820814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T14:26:00,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840820915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840820915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840820915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840820916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840820917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:00,993 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/fa993a83c5754d36bc8afe664a60ff0d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/fa993a83c5754d36bc8afe664a60ff0d 2024-12-10T14:26:00,999 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into fa993a83c5754d36bc8afe664a60ff0d(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:00,999 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:00,999 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840760553; duration=0sec 2024-12-10T14:26:01,000 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:01,000 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:26:01,025 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/ba3b7ec1876a4098998381d3154198cc 2024-12-10T14:26:01,025 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/cc411183c9a64116b1aa9b0809a30ee6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cc411183c9a64116b1aa9b0809a30ee6 2024-12-10T14:26:01,033 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into cc411183c9a64116b1aa9b0809a30ee6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:01,033 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:01,033 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840760553; duration=0sec 2024-12-10T14:26:01,035 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:01,035 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:26:01,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/8099ba84afdd4104a85f0243df59bd4f is 50, key is test_row_0/C:col10/1733840759677/Put/seqid=0 2024-12-10T14:26:01,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742014_1190 (size=12151) 2024-12-10T14:26:01,043 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/8099ba84afdd4104a85f0243df59bd4f 2024-12-10T14:26:01,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/24a1be64548c46719a4c9822aa62e535 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/24a1be64548c46719a4c9822aa62e535 2024-12-10T14:26:01,052 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/24a1be64548c46719a4c9822aa62e535, entries=150, sequenceid=238, filesize=30.4 K 2024-12-10T14:26:01,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/ba3b7ec1876a4098998381d3154198cc as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ba3b7ec1876a4098998381d3154198cc 2024-12-10T14:26:01,057 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ba3b7ec1876a4098998381d3154198cc, entries=150, sequenceid=238, filesize=11.9 K 2024-12-10T14:26:01,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/8099ba84afdd4104a85f0243df59bd4f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8099ba84afdd4104a85f0243df59bd4f 2024-12-10T14:26:01,064 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8099ba84afdd4104a85f0243df59bd4f, entries=150, sequenceid=238, filesize=11.9 K 2024-12-10T14:26:01,065 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 18673fce5a633353d821462d51dbbd4b in 495ms, sequenceid=238, compaction requested=false 2024-12-10T14:26:01,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:01,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:01,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-10T14:26:01,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-10T14:26:01,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-10T14:26:01,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2600 sec 2024-12-10T14:26:01,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.2630 sec 2024-12-10T14:26:01,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:01,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-10T14:26:01,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:01,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:01,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:01,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:01,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:01,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:01,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102142d5bf92f64355b0bd9bfcd3b43455_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840760810/Put/seqid=0 2024-12-10T14:26:01,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840821128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840821127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840821130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840821130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840821131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742015_1191 (size=14944) 2024-12-10T14:26:01,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840821233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840821233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840821234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840821234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840821234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840821435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840821435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840821436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840821436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840821437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,561 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:01,565 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102142d5bf92f64355b0bd9bfcd3b43455_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102142d5bf92f64355b0bd9bfcd3b43455_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:01,567 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b8e7fcf751b7490c9578a91dd358d608, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:01,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b8e7fcf751b7490c9578a91dd358d608 is 175, key is test_row_0/A:col10/1733840760810/Put/seqid=0 2024-12-10T14:26:01,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742016_1192 (size=39899) 2024-12-10T14:26:01,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840821739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840821739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840821740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840821740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840821742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:01,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T14:26:01,910 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-10T14:26:01,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-10T14:26:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T14:26:01,914 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:01,914 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:01,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:01,973 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b8e7fcf751b7490c9578a91dd358d608 2024-12-10T14:26:01,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e794136822754eca91cc778d469e8822 is 50, key is test_row_0/B:col10/1733840760810/Put/seqid=0 2024-12-10T14:26:01,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742017_1193 (size=12251) 2024-12-10T14:26:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T14:26:02,066 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:02,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T14:26:02,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:02,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T14:26:02,219 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:02,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T14:26:02,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:02,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:02,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840822243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:02,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:02,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840822244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:02,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:02,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840822245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:02,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:02,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840822246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:02,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840822248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:02,372 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:02,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T14:26:02,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:02,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:02,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e794136822754eca91cc778d469e8822 2024-12-10T14:26:02,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/1b78a1cf0a5845ac916a037ddfb29df2 is 50, key is test_row_0/C:col10/1733840760810/Put/seqid=0 2024-12-10T14:26:02,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742018_1194 (size=12251) 2024-12-10T14:26:02,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/1b78a1cf0a5845ac916a037ddfb29df2 2024-12-10T14:26:02,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b8e7fcf751b7490c9578a91dd358d608 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b8e7fcf751b7490c9578a91dd358d608 2024-12-10T14:26:02,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b8e7fcf751b7490c9578a91dd358d608, entries=200, sequenceid=262, filesize=39.0 K 2024-12-10T14:26:02,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/e794136822754eca91cc778d469e8822 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e794136822754eca91cc778d469e8822 2024-12-10T14:26:02,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e794136822754eca91cc778d469e8822, entries=150, sequenceid=262, filesize=12.0 K 2024-12-10T14:26:02,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/1b78a1cf0a5845ac916a037ddfb29df2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/1b78a1cf0a5845ac916a037ddfb29df2 2024-12-10T14:26:02,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/1b78a1cf0a5845ac916a037ddfb29df2, entries=150, sequenceid=262, filesize=12.0 K 2024-12-10T14:26:02,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 18673fce5a633353d821462d51dbbd4b in 1311ms, sequenceid=262, compaction requested=true 2024-12-10T14:26:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:02,429 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:02,429 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:02,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:02,431 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102621 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:02,431 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:02,431 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:26:02,431 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:26:02,431 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,431 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,431 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dbb1303755634816a28660c30f6380cd, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ba3b7ec1876a4098998381d3154198cc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e794136822754eca91cc778d469e8822] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.2 K 2024-12-10T14:26:02,431 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/fa993a83c5754d36bc8afe664a60ff0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/24a1be64548c46719a4c9822aa62e535, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b8e7fcf751b7490c9578a91dd358d608] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=100.2 K 2024-12-10T14:26:02,431 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,431 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/fa993a83c5754d36bc8afe664a60ff0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/24a1be64548c46719a4c9822aa62e535, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b8e7fcf751b7490c9578a91dd358d608] 2024-12-10T14:26:02,432 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting dbb1303755634816a28660c30f6380cd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733840759548 2024-12-10T14:26:02,432 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa993a83c5754d36bc8afe664a60ff0d, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733840759548 2024-12-10T14:26:02,432 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ba3b7ec1876a4098998381d3154198cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733840759676 2024-12-10T14:26:02,432 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24a1be64548c46719a4c9822aa62e535, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733840759676 2024-12-10T14:26:02,433 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e794136822754eca91cc778d469e8822, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733840760810 2024-12-10T14:26:02,433 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8e7fcf751b7490c9578a91dd358d608, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733840760810 2024-12-10T14:26:02,441 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:02,444 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#160 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:02,445 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/10b8b37702bf43a99691f43950596527 is 50, key is test_row_0/B:col10/1733840760810/Put/seqid=0 2024-12-10T14:26:02,445 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412109575d9c299e54ee6bde94c6373a3011c_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:02,449 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412109575d9c299e54ee6bde94c6373a3011c_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:02,450 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109575d9c299e54ee6bde94c6373a3011c_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742019_1195 (size=12865) 2024-12-10T14:26:02,464 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/10b8b37702bf43a99691f43950596527 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/10b8b37702bf43a99691f43950596527 2024-12-10T14:26:02,471 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into 10b8b37702bf43a99691f43950596527(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:02,471 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:02,471 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840762429; duration=0sec 2024-12-10T14:26:02,471 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:02,471 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:26:02,471 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:02,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742020_1196 (size=4469) 2024-12-10T14:26:02,473 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:02,473 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:26:02,473 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,473 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cc411183c9a64116b1aa9b0809a30ee6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8099ba84afdd4104a85f0243df59bd4f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/1b78a1cf0a5845ac916a037ddfb29df2] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.2 K 2024-12-10T14:26:02,475 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting cc411183c9a64116b1aa9b0809a30ee6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1733840759548 2024-12-10T14:26:02,475 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8099ba84afdd4104a85f0243df59bd4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733840759676 2024-12-10T14:26:02,476 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b78a1cf0a5845ac916a037ddfb29df2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733840760810 2024-12-10T14:26:02,486 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#161 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:02,487 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/961b667a0da44340ba0fc889af46505e is 50, key is test_row_0/C:col10/1733840760810/Put/seqid=0 2024-12-10T14:26:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742021_1197 (size=12865) 2024-12-10T14:26:02,498 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/961b667a0da44340ba0fc889af46505e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/961b667a0da44340ba0fc889af46505e 2024-12-10T14:26:02,505 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 961b667a0da44340ba0fc889af46505e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:02,505 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:02,505 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840762429; duration=0sec 2024-12-10T14:26:02,505 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:02,505 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:26:02,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T14:26:02,525 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:02,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T14:26:02,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:02,526 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:26:02,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:02,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:02,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:02,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:02,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:02,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:02,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101f459b15b4924529ba7f54091bdbd106_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840761130/Put/seqid=0 2024-12-10T14:26:02,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742022_1198 (size=12454) 2024-12-10T14:26:02,671 INFO [master/db1d50717577:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-10T14:26:02,671 INFO [master/db1d50717577:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-10T14:26:02,874 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#159 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:02,875 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1b954cf6f6984d7aaa28977a60d4720a is 175, key is test_row_0/A:col10/1733840760810/Put/seqid=0 2024-12-10T14:26:02,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742023_1199 (size=31819) 2024-12-10T14:26:02,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:02,949 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101f459b15b4924529ba7f54091bdbd106_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101f459b15b4924529ba7f54091bdbd106_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:02,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/de51589484034130bf6ea748da1737cf, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:02,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/de51589484034130bf6ea748da1737cf is 175, key is test_row_0/A:col10/1733840761130/Put/seqid=0 2024-12-10T14:26:02,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742024_1200 (size=31255) 2024-12-10T14:26:02,958 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=279, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/de51589484034130bf6ea748da1737cf 2024-12-10T14:26:02,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dab6624a318a4e16ab450beef69a21fd is 50, key is test_row_0/B:col10/1733840761130/Put/seqid=0 2024-12-10T14:26:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742025_1201 (size=12301) 2024-12-10T14:26:02,970 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dab6624a318a4e16ab450beef69a21fd 2024-12-10T14:26:02,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/19344c7bfb8b4636a96437f0e38daec3 is 50, key is test_row_0/C:col10/1733840761130/Put/seqid=0 2024-12-10T14:26:02,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742026_1202 (size=12301) 2024-12-10T14:26:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T14:26:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:03,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:03,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840823265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840823266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840823266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840823267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840823267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,285 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1b954cf6f6984d7aaa28977a60d4720a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1b954cf6f6984d7aaa28977a60d4720a 2024-12-10T14:26:03,290 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 1b954cf6f6984d7aaa28977a60d4720a(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:03,290 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:03,290 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840762429; duration=0sec 2024-12-10T14:26:03,290 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:03,290 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:26:03,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840823368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840823369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840823372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840823372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840823372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,382 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/19344c7bfb8b4636a96437f0e38daec3 2024-12-10T14:26:03,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/de51589484034130bf6ea748da1737cf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/de51589484034130bf6ea748da1737cf 2024-12-10T14:26:03,392 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/de51589484034130bf6ea748da1737cf, entries=150, sequenceid=279, filesize=30.5 K 2024-12-10T14:26:03,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dab6624a318a4e16ab450beef69a21fd as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dab6624a318a4e16ab450beef69a21fd 2024-12-10T14:26:03,397 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dab6624a318a4e16ab450beef69a21fd, entries=150, sequenceid=279, filesize=12.0 K 2024-12-10T14:26:03,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/19344c7bfb8b4636a96437f0e38daec3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/19344c7bfb8b4636a96437f0e38daec3 2024-12-10T14:26:03,403 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/19344c7bfb8b4636a96437f0e38daec3, entries=150, sequenceid=279, filesize=12.0 K 2024-12-10T14:26:03,404 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 18673fce5a633353d821462d51dbbd4b in 878ms, sequenceid=279, compaction requested=false 2024-12-10T14:26:03,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:03,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:03,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-10T14:26:03,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-10T14:26:03,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-10T14:26:03,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4910 sec 2024-12-10T14:26:03,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.4950 sec 2024-12-10T14:26:03,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:03,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T14:26:03,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:03,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:03,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:03,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:03,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:03,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:03,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840823582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840823582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840823582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840823582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840823583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109f0404be05f94d018bd35ae9394ba3c9_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840763572/Put/seqid=0 2024-12-10T14:26:03,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742027_1203 (size=17534) 2024-12-10T14:26:03,598 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:03,603 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109f0404be05f94d018bd35ae9394ba3c9_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109f0404be05f94d018bd35ae9394ba3c9_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:03,604 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/cb1ee0f725624bd2b09fe052f23bf141, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:03,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/cb1ee0f725624bd2b09fe052f23bf141 is 175, key is test_row_0/A:col10/1733840763572/Put/seqid=0 2024-12-10T14:26:03,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742028_1204 (size=48639) 2024-12-10T14:26:03,610 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=304, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/cb1ee0f725624bd2b09fe052f23bf141 2024-12-10T14:26:03,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/a145d8ba2cb8410a9020d50f94dd82dc is 50, key is test_row_0/B:col10/1733840763572/Put/seqid=0 2024-12-10T14:26:03,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742029_1205 (size=12301) 2024-12-10T14:26:03,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840823687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840823687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840823687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840823687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840823687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840823889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840823889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840823890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840823890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:03,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:03,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840823890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T14:26:04,020 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-10T14:26:04,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:04,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-10T14:26:04,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T14:26:04,023 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:04,023 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:04,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:04,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/a145d8ba2cb8410a9020d50f94dd82dc 2024-12-10T14:26:04,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/e631c73780bb4263a7d496596871fdf0 is 50, key is test_row_0/C:col10/1733840763572/Put/seqid=0 2024-12-10T14:26:04,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742030_1206 (size=12301) 2024-12-10T14:26:04,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T14:26:04,174 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:04,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-10T14:26:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:04,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:04,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840824193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840824193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840824194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840824194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840824194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T14:26:04,327 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:04,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-10T14:26:04,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:04,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:04,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:04,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:04,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/e631c73780bb4263a7d496596871fdf0 2024-12-10T14:26:04,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/cb1ee0f725624bd2b09fe052f23bf141 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/cb1ee0f725624bd2b09fe052f23bf141 2024-12-10T14:26:04,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/cb1ee0f725624bd2b09fe052f23bf141, entries=250, sequenceid=304, filesize=47.5 K 2024-12-10T14:26:04,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/a145d8ba2cb8410a9020d50f94dd82dc as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/a145d8ba2cb8410a9020d50f94dd82dc 2024-12-10T14:26:04,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/a145d8ba2cb8410a9020d50f94dd82dc, entries=150, sequenceid=304, filesize=12.0 K 2024-12-10T14:26:04,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/e631c73780bb4263a7d496596871fdf0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e631c73780bb4263a7d496596871fdf0 2024-12-10T14:26:04,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e631c73780bb4263a7d496596871fdf0, entries=150, sequenceid=304, filesize=12.0 K 2024-12-10T14:26:04,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 18673fce5a633353d821462d51dbbd4b in 885ms, sequenceid=304, compaction requested=true 2024-12-10T14:26:04,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:04,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:04,460 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:04,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:04,460 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:04,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:04,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:04,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:04,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:04,461 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:04,461 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111713 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:04,461 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:26:04,461 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:26:04,461 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,461 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,461 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1b954cf6f6984d7aaa28977a60d4720a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/de51589484034130bf6ea748da1737cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/cb1ee0f725624bd2b09fe052f23bf141] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=109.1 K 2024-12-10T14:26:04,461 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/10b8b37702bf43a99691f43950596527, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dab6624a318a4e16ab450beef69a21fd, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/a145d8ba2cb8410a9020d50f94dd82dc] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.6 K 2024-12-10T14:26:04,461 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,461 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1b954cf6f6984d7aaa28977a60d4720a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/de51589484034130bf6ea748da1737cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/cb1ee0f725624bd2b09fe052f23bf141] 2024-12-10T14:26:04,462 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b954cf6f6984d7aaa28977a60d4720a, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733840760810 2024-12-10T14:26:04,462 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 10b8b37702bf43a99691f43950596527, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733840760810 2024-12-10T14:26:04,463 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting de51589484034130bf6ea748da1737cf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1733840761126 2024-12-10T14:26:04,463 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting dab6624a318a4e16ab450beef69a21fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1733840761126 2024-12-10T14:26:04,463 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb1ee0f725624bd2b09fe052f23bf141, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733840763259 2024-12-10T14:26:04,463 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a145d8ba2cb8410a9020d50f94dd82dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733840763266 2024-12-10T14:26:04,470 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:04,470 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#168 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:04,471 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/43361b7c33184ad7b1d062b3de94d601 is 50, key is test_row_0/B:col10/1733840763572/Put/seqid=0 2024-12-10T14:26:04,472 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210e33a4072f7a24eebbe94dd46455c306b_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:04,474 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210e33a4072f7a24eebbe94dd46455c306b_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:04,474 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e33a4072f7a24eebbe94dd46455c306b_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:04,480 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:04,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-10T14:26:04,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,481 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:26:04,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:04,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:04,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:04,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:04,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:04,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:04,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742032_1208 (size=4469) 2024-12-10T14:26:04,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742031_1207 (size=13017) 2024-12-10T14:26:04,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210df48c3e303a943a9a75e3910a3683bd8_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840763580/Put/seqid=0 2024-12-10T14:26:04,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742033_1209 (size=12454) 2024-12-10T14:26:04,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:04,500 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210df48c3e303a943a9a75e3910a3683bd8_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210df48c3e303a943a9a75e3910a3683bd8_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:04,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b100670681804c0e95632a29af50b418, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:04,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b100670681804c0e95632a29af50b418 is 175, key is test_row_0/A:col10/1733840763580/Put/seqid=0 2024-12-10T14:26:04,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742034_1210 (size=31255) 2024-12-10T14:26:04,506 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b100670681804c0e95632a29af50b418 2024-12-10T14:26:04,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/af67abc5bae74ead85b5b230924575f7 is 50, key is test_row_0/B:col10/1733840763580/Put/seqid=0 2024-12-10T14:26:04,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742035_1211 (size=12301) 2024-12-10T14:26:04,520 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/af67abc5bae74ead85b5b230924575f7 2024-12-10T14:26:04,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/fce3b8cd53a44c84a954fe2b66c3603a is 50, key is test_row_0/C:col10/1733840763580/Put/seqid=0 2024-12-10T14:26:04,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742036_1212 (size=12301) 2024-12-10T14:26:04,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T14:26:04,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:04,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:04,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840824714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840824715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840824715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840824716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840824716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840824819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840824819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840824819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840824819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840824820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:04,886 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#169 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:04,887 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/89f52233ffa140f59734651e5dd36f3a is 175, key is test_row_0/A:col10/1733840763572/Put/seqid=0 2024-12-10T14:26:04,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742037_1213 (size=31971) 2024-12-10T14:26:04,894 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/43361b7c33184ad7b1d062b3de94d601 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/43361b7c33184ad7b1d062b3de94d601 2024-12-10T14:26:04,898 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/89f52233ffa140f59734651e5dd36f3a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/89f52233ffa140f59734651e5dd36f3a 2024-12-10T14:26:04,899 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into 43361b7c33184ad7b1d062b3de94d601(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:04,899 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:04,899 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840764460; duration=0sec 2024-12-10T14:26:04,899 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:04,899 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:26:04,899 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:04,902 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:04,902 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:26:04,902 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,902 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/961b667a0da44340ba0fc889af46505e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/19344c7bfb8b4636a96437f0e38daec3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e631c73780bb4263a7d496596871fdf0] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.6 K 2024-12-10T14:26:04,903 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 961b667a0da44340ba0fc889af46505e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733840760810 2024-12-10T14:26:04,903 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 19344c7bfb8b4636a96437f0e38daec3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1733840761126 2024-12-10T14:26:04,903 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e631c73780bb4263a7d496596871fdf0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733840763266 2024-12-10T14:26:04,904 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 89f52233ffa140f59734651e5dd36f3a(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:04,904 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:04,904 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840764459; duration=0sec 2024-12-10T14:26:04,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:04,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:26:04,910 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#173 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:04,910 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/618c96041a6b45a8a81c1bb45eb1581b is 50, key is test_row_0/C:col10/1733840763572/Put/seqid=0 2024-12-10T14:26:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742038_1214 (size=13017) 2024-12-10T14:26:04,932 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/fce3b8cd53a44c84a954fe2b66c3603a 2024-12-10T14:26:04,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b100670681804c0e95632a29af50b418 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b100670681804c0e95632a29af50b418 2024-12-10T14:26:04,940 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b100670681804c0e95632a29af50b418, entries=150, sequenceid=316, filesize=30.5 K 2024-12-10T14:26:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/af67abc5bae74ead85b5b230924575f7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/af67abc5bae74ead85b5b230924575f7 2024-12-10T14:26:04,945 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/af67abc5bae74ead85b5b230924575f7, entries=150, sequenceid=316, filesize=12.0 K 2024-12-10T14:26:04,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/fce3b8cd53a44c84a954fe2b66c3603a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fce3b8cd53a44c84a954fe2b66c3603a 2024-12-10T14:26:04,950 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fce3b8cd53a44c84a954fe2b66c3603a, entries=150, sequenceid=316, filesize=12.0 K 2024-12-10T14:26:04,951 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 18673fce5a633353d821462d51dbbd4b in 470ms, sequenceid=316, compaction requested=false 2024-12-10T14:26:04,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:04,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:04,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-10T14:26:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-10T14:26:04,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-10T14:26:04,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 930 msec 2024-12-10T14:26:04,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 933 msec 2024-12-10T14:26:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:05,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:26:05,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:05,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:05,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:05,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:05,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:05,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:05,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840825026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840825029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840825029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840825030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840825030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d6f7f7b204974020b8fe0183ef675a20_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840765023/Put/seqid=0 2024-12-10T14:26:05,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742039_1215 (size=12454) 2024-12-10T14:26:05,037 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,041 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d6f7f7b204974020b8fe0183ef675a20_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d6f7f7b204974020b8fe0183ef675a20_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:05,042 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1bf67c9762b64ebfbf106fa5430ac253, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:05,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1bf67c9762b64ebfbf106fa5430ac253 is 175, key is test_row_0/A:col10/1733840765023/Put/seqid=0 2024-12-10T14:26:05,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742040_1216 (size=31255) 2024-12-10T14:26:05,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T14:26:05,126 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-10T14:26:05,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:05,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-10T14:26:05,128 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:05,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T14:26:05,129 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:05,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:05,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840825132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840825132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840825133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840825133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840825134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T14:26:05,281 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:05,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T14:26:05,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:05,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:05,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:05,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:05,325 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/618c96041a6b45a8a81c1bb45eb1581b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/618c96041a6b45a8a81c1bb45eb1581b 2024-12-10T14:26:05,330 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 618c96041a6b45a8a81c1bb45eb1581b(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:05,330 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:05,330 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840764460; duration=0sec 2024-12-10T14:26:05,330 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:05,330 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:26:05,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840825334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840825335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840825336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840825336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840825336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T14:26:05,433 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:05,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T14:26:05,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:05,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:05,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:05,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:05,449 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=346, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1bf67c9762b64ebfbf106fa5430ac253 2024-12-10T14:26:05,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/b46bb416e8154d9e944de5503e083ba1 is 50, key is test_row_0/B:col10/1733840765023/Put/seqid=0 2024-12-10T14:26:05,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742041_1217 (size=12301) 2024-12-10T14:26:05,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/b46bb416e8154d9e944de5503e083ba1 2024-12-10T14:26:05,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/35e42408935e48519cf0e4215f1adf18 is 50, key is test_row_0/C:col10/1733840765023/Put/seqid=0 2024-12-10T14:26:05,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742042_1218 (size=12301) 2024-12-10T14:26:05,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/35e42408935e48519cf0e4215f1adf18 2024-12-10T14:26:05,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/1bf67c9762b64ebfbf106fa5430ac253 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1bf67c9762b64ebfbf106fa5430ac253 2024-12-10T14:26:05,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1bf67c9762b64ebfbf106fa5430ac253, entries=150, sequenceid=346, filesize=30.5 K 2024-12-10T14:26:05,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/b46bb416e8154d9e944de5503e083ba1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b46bb416e8154d9e944de5503e083ba1 2024-12-10T14:26:05,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b46bb416e8154d9e944de5503e083ba1, entries=150, sequenceid=346, filesize=12.0 K 2024-12-10T14:26:05,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/35e42408935e48519cf0e4215f1adf18 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/35e42408935e48519cf0e4215f1adf18 2024-12-10T14:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/35e42408935e48519cf0e4215f1adf18, entries=150, sequenceid=346, filesize=12.0 K 2024-12-10T14:26:05,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 18673fce5a633353d821462d51dbbd4b in 469ms, sequenceid=346, compaction requested=true 2024-12-10T14:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:05,492 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,492 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:05,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:05,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,494 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:05,494 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:26:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,494 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,495 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/89f52233ffa140f59734651e5dd36f3a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b100670681804c0e95632a29af50b418, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1bf67c9762b64ebfbf106fa5430ac253] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=92.3 K 2024-12-10T14:26:05,495 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,495 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/89f52233ffa140f59734651e5dd36f3a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b100670681804c0e95632a29af50b418, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1bf67c9762b64ebfbf106fa5430ac253] 2024-12-10T14:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,495 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:05,495 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:26:05,495 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,495 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/43361b7c33184ad7b1d062b3de94d601, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/af67abc5bae74ead85b5b230924575f7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b46bb416e8154d9e944de5503e083ba1] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.7 K 2024-12-10T14:26:05,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,496 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89f52233ffa140f59734651e5dd36f3a, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733840763266 2024-12-10T14:26:05,496 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 43361b7c33184ad7b1d062b3de94d601, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733840763266 2024-12-10T14:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,496 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b100670681804c0e95632a29af50b418, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733840763580 2024-12-10T14:26:05,496 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting af67abc5bae74ead85b5b230924575f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733840763580 2024-12-10T14:26:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,497 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b46bb416e8154d9e944de5503e083ba1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733840765022 2024-12-10T14:26:05,497 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bf67c9762b64ebfbf106fa5430ac253, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733840765022 2024-12-10T14:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,507 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:05,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,513 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#178 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:05,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,513 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/06031c52223e46ea87203a1f8c9e2ceb is 50, key is test_row_0/B:col10/1733840765023/Put/seqid=0 2024-12-10T14:26:05,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,525 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210ca4b1cf392dd4dd296f2d20425d49eca_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,529 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210ca4b1cf392dd4dd296f2d20425d49eca_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,529 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ca4b1cf392dd4dd296f2d20425d49eca_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742043_1219 (size=13119) 2024-12-10T14:26:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742044_1220 (size=4469) 2024-12-10T14:26:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,557 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/06031c52223e46ea87203a1f8c9e2ceb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/06031c52223e46ea87203a1f8c9e2ceb 2024-12-10T14:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,558 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#177 average throughput is 0.48 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,559 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/6502997d43684d4191f48c8121dc9650 is 175, key is test_row_0/A:col10/1733840765023/Put/seqid=0 2024-12-10T14:26:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,566 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into 06031c52223e46ea87203a1f8c9e2ceb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:05,566 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:05,566 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840765492; duration=0sec 2024-12-10T14:26:05,566 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:05,566 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,566 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:05,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,568 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:05,568 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:26:05,568 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,568 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/618c96041a6b45a8a81c1bb45eb1581b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fce3b8cd53a44c84a954fe2b66c3603a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/35e42408935e48519cf0e4215f1adf18] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=36.7 K 2024-12-10T14:26:05,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742045_1221 (size=32073) 2024-12-10T14:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,569 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 618c96041a6b45a8a81c1bb45eb1581b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1733840763266 2024-12-10T14:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,570 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting fce3b8cd53a44c84a954fe2b66c3603a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733840763580 2024-12-10T14:26:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,570 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 35e42408935e48519cf0e4215f1adf18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733840765022 2024-12-10T14:26:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,575 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/6502997d43684d4191f48c8121dc9650 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/6502997d43684d4191f48c8121dc9650 2024-12-10T14:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,581 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#179 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:05,582 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/50e7c334eb994fc78e627d1c49b08930 is 50, key is test_row_0/C:col10/1733840765023/Put/seqid=0 2024-12-10T14:26:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,584 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 6502997d43684d4191f48c8121dc9650(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:05,584 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:05,584 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840765492; duration=0sec 2024-12-10T14:26:05,584 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:05,584 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:26:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:05,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T14:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:05,587 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T14:26:05,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:05,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:05,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:05,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:05,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:05,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742046_1222 (size=13119) 2024-12-10T14:26:05,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121009ab564882e44dab93ea9b671f24d99b_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840765025/Put/seqid=0 2024-12-10T14:26:05,612 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/50e7c334eb994fc78e627d1c49b08930 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/50e7c334eb994fc78e627d1c49b08930 2024-12-10T14:26:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,618 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 50e7c334eb994fc78e627d1c49b08930(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:05,618 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:05,618 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840765493; duration=0sec 2024-12-10T14:26:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,618 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:05,618 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:26:05,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742047_1223 (size=9914) 2024-12-10T14:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,628 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121009ab564882e44dab93ea9b671f24d99b_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121009ab564882e44dab93ea9b671f24d99b_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b54f9927fe5140288645cac1d7e3f6cb, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b54f9927fe5140288645cac1d7e3f6cb is 175, key is test_row_0/A:col10/1733840765025/Put/seqid=0 2024-12-10T14:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742048_1224 (size=22561) 2024-12-10T14:26:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:05,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:05,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840825679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840825680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840825681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840825684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840825682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T14:26:05,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840825786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840825786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840825786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840825786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840825787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840825989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840825989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840825989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840825989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840825989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,038 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=357, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b54f9927fe5140288645cac1d7e3f6cb 2024-12-10T14:26:06,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3ae8ee3d39c64c1cb203859f507a92d0 is 50, key is test_row_0/B:col10/1733840765025/Put/seqid=0 2024-12-10T14:26:06,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742049_1225 (size=9857) 2024-12-10T14:26:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T14:26:06,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840826292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840826291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840826293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840826293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840826294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,455 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3ae8ee3d39c64c1cb203859f507a92d0 2024-12-10T14:26:06,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/34659317362947308c7960ce981a239e is 50, key is test_row_0/C:col10/1733840765025/Put/seqid=0 2024-12-10T14:26:06,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742050_1226 (size=9857) 2024-12-10T14:26:06,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840826797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840826796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840826797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840826799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:06,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840826799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:06,869 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/34659317362947308c7960ce981a239e 2024-12-10T14:26:06,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/b54f9927fe5140288645cac1d7e3f6cb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b54f9927fe5140288645cac1d7e3f6cb 2024-12-10T14:26:06,878 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b54f9927fe5140288645cac1d7e3f6cb, entries=100, sequenceid=357, filesize=22.0 K 2024-12-10T14:26:06,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3ae8ee3d39c64c1cb203859f507a92d0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3ae8ee3d39c64c1cb203859f507a92d0 2024-12-10T14:26:06,884 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3ae8ee3d39c64c1cb203859f507a92d0, entries=100, sequenceid=357, filesize=9.6 K 2024-12-10T14:26:06,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/34659317362947308c7960ce981a239e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/34659317362947308c7960ce981a239e 2024-12-10T14:26:06,889 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/34659317362947308c7960ce981a239e, entries=100, sequenceid=357, filesize=9.6 K 2024-12-10T14:26:06,891 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 18673fce5a633353d821462d51dbbd4b in 1303ms, sequenceid=357, compaction requested=false 2024-12-10T14:26:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:06,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-10T14:26:06,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-10T14:26:06,893 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-10T14:26:06,893 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7630 sec 2024-12-10T14:26:06,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.7670 sec 2024-12-10T14:26:07,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T14:26:07,233 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-10T14:26:07,234 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:07,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-10T14:26:07,235 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:07,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T14:26:07,236 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:07,236 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:07,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T14:26:07,388 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:07,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-10T14:26:07,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:07,389 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-10T14:26:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:07,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:07,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fa7d1ac8823e4b6b990d3df35769b4dd_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840765678/Put/seqid=0 2024-12-10T14:26:07,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742051_1227 (size=12454) 2024-12-10T14:26:07,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T14:26:07,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:07,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:07,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840827803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840827804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840827804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840827805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840827806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,810 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fa7d1ac8823e4b6b990d3df35769b4dd_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fa7d1ac8823e4b6b990d3df35769b4dd_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:07,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:07,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f is 175, key is test_row_0/A:col10/1733840765678/Put/seqid=0 2024-12-10T14:26:07,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742052_1228 (size=31255) 2024-12-10T14:26:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T14:26:07,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840827906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840827907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840827907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:07,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840827907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840828108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840828109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840828110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840828110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,217 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=386, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f 2024-12-10T14:26:08,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3696ef225ec546429baaaffe26b9feff is 50, key is test_row_0/B:col10/1733840765678/Put/seqid=0 2024-12-10T14:26:08,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742053_1229 (size=12301) 2024-12-10T14:26:08,238 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3696ef225ec546429baaaffe26b9feff 2024-12-10T14:26:08,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/519566d4322141f7856c962ddee47dd8 is 50, key is test_row_0/C:col10/1733840765678/Put/seqid=0 2024-12-10T14:26:08,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742054_1230 (size=12301) 2024-12-10T14:26:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T14:26:08,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840828410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840828413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840828413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840828430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,650 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/519566d4322141f7856c962ddee47dd8 2024-12-10T14:26:08,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f 2024-12-10T14:26:08,660 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f, entries=150, sequenceid=386, filesize=30.5 K 2024-12-10T14:26:08,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/3696ef225ec546429baaaffe26b9feff as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3696ef225ec546429baaaffe26b9feff 2024-12-10T14:26:08,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,665 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3696ef225ec546429baaaffe26b9feff, entries=150, sequenceid=386, filesize=12.0 K 2024-12-10T14:26:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/519566d4322141f7856c962ddee47dd8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/519566d4322141f7856c962ddee47dd8 2024-12-10T14:26:08,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,671 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/519566d4322141f7856c962ddee47dd8, entries=150, sequenceid=386, filesize=12.0 K 2024-12-10T14:26:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,672 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 18673fce5a633353d821462d51dbbd4b in 1283ms, sequenceid=386, compaction requested=true 2024-12-10T14:26:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:08,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:08,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-10T14:26:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-10T14:26:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-10T14:26:08,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4380 sec 2024-12-10T14:26:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.4420 sec 2024-12-10T14:26:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:08,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:26:08,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:08,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:08,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:08,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:08,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:08,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:08,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fddab759ba2a440b995ae9efb474a970_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840768950/Put/seqid=0 2024-12-10T14:26:08,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:08,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840828985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742055_1231 (size=17534) 2024-12-10T14:26:08,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840828985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840828986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:08,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:08,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840828987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840829091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840829091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840829091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840829092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840829293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840829293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840829294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840829296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T14:26:09,340 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-10T14:26:09,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:09,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-10T14:26:09,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-10T14:26:09,343 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:09,343 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:09,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:09,390 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:09,395 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fddab759ba2a440b995ae9efb474a970_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fddab759ba2a440b995ae9efb474a970_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:09,395 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/7c9a954b02e2413f82aff0835b605537, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:09,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/7c9a954b02e2413f82aff0835b605537 is 175, key is test_row_0/A:col10/1733840768950/Put/seqid=0 2024-12-10T14:26:09,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742056_1232 (size=48639) 2024-12-10T14:26:09,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-10T14:26:09,495 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:09,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:09,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:09,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840829595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840829595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840829596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840829599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-10T14:26:09,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:09,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,801 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:09,801 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=398, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/7c9a954b02e2413f82aff0835b605537 2024-12-10T14:26:09,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:09,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:09,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/324e4ba8d9f941759d1aaa55048b4c31 is 50, key is test_row_0/B:col10/1733840768950/Put/seqid=0 2024-12-10T14:26:09,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742057_1233 (size=12301) 2024-12-10T14:26:09,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:09,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48046 deadline: 1733840829813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:09,815 DEBUG [Thread-617 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., hostname=db1d50717577,46699,1733840717757, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:26:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-10T14:26:09,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:09,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:09,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:09,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:09,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:10,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840830098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:10,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:10,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840830101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:10,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840830102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:10,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:10,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840830104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:10,108 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:10,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:10,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:10,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/324e4ba8d9f941759d1aaa55048b4c31 2024-12-10T14:26:10,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/292cb55d988647028e40b9e493d7bd86 is 50, key is test_row_0/C:col10/1733840768950/Put/seqid=0 2024-12-10T14:26:10,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742058_1234 (size=12301) 2024-12-10T14:26:10,260 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:10,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:10,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:10,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,413 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:10,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:10,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:10,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-10T14:26:10,566 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:10,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:10,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:10,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:10,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/292cb55d988647028e40b9e493d7bd86 2024-12-10T14:26:10,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/7c9a954b02e2413f82aff0835b605537 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/7c9a954b02e2413f82aff0835b605537 2024-12-10T14:26:10,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/7c9a954b02e2413f82aff0835b605537, entries=250, sequenceid=398, filesize=47.5 K 2024-12-10T14:26:10,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/324e4ba8d9f941759d1aaa55048b4c31 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/324e4ba8d9f941759d1aaa55048b4c31 2024-12-10T14:26:10,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/324e4ba8d9f941759d1aaa55048b4c31, entries=150, sequenceid=398, filesize=12.0 K 2024-12-10T14:26:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/292cb55d988647028e40b9e493d7bd86 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/292cb55d988647028e40b9e493d7bd86 2024-12-10T14:26:10,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/292cb55d988647028e40b9e493d7bd86, entries=150, sequenceid=398, filesize=12.0 K 2024-12-10T14:26:10,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 18673fce5a633353d821462d51dbbd4b in 1699ms, sequenceid=398, compaction requested=true 2024-12-10T14:26:10,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:10,650 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:26:10,650 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:26:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:10,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:10,651 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134528 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:26:10,651 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:26:10,651 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:26:10,651 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:26:10,651 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,651 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,652 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/06031c52223e46ea87203a1f8c9e2ceb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3ae8ee3d39c64c1cb203859f507a92d0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3696ef225ec546429baaaffe26b9feff, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/324e4ba8d9f941759d1aaa55048b4c31] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=46.5 K 2024-12-10T14:26:10,652 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/6502997d43684d4191f48c8121dc9650, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b54f9927fe5140288645cac1d7e3f6cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/7c9a954b02e2413f82aff0835b605537] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=131.4 K 2024-12-10T14:26:10,652 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,652 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/6502997d43684d4191f48c8121dc9650, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b54f9927fe5140288645cac1d7e3f6cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/7c9a954b02e2413f82aff0835b605537] 2024-12-10T14:26:10,652 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 06031c52223e46ea87203a1f8c9e2ceb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733840765022 2024-12-10T14:26:10,652 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6502997d43684d4191f48c8121dc9650, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733840765022 2024-12-10T14:26:10,652 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ae8ee3d39c64c1cb203859f507a92d0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733840765025 2024-12-10T14:26:10,653 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b54f9927fe5140288645cac1d7e3f6cb, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733840765025 2024-12-10T14:26:10,653 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b16d39be4dc4b3a8ed8e5eaaad2ae7f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733840765678 2024-12-10T14:26:10,654 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c9a954b02e2413f82aff0835b605537, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840767803 2024-12-10T14:26:10,654 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3696ef225ec546429baaaffe26b9feff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733840765678 2024-12-10T14:26:10,655 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 324e4ba8d9f941759d1aaa55048b4c31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840767803 2024-12-10T14:26:10,664 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:10,667 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#190 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:10,669 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/ab7dae0cd21d48f493d39cd1175d9f18 is 50, key is test_row_0/B:col10/1733840768950/Put/seqid=0 2024-12-10T14:26:10,670 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210a76cf590aed54e55905c6524025a47d2_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:10,672 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210a76cf590aed54e55905c6524025a47d2_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:10,672 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a76cf590aed54e55905c6524025a47d2_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:10,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742059_1235 (size=13255) 2024-12-10T14:26:10,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742060_1236 (size=4469) 2024-12-10T14:26:10,688 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#189 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:10,689 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/3977b7a642794e35b5fa8a07d13c9525 is 175, key is test_row_0/A:col10/1733840768950/Put/seqid=0 2024-12-10T14:26:10,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742061_1237 (size=32209) 2024-12-10T14:26:10,719 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:10,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-10T14:26:10,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:10,720 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T14:26:10,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:10,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:10,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:10,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:10,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:10,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:10,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107c057feb5dbf45178463f350b45be6dd_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840768985/Put/seqid=0 2024-12-10T14:26:10,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742062_1238 (size=12454) 2024-12-10T14:26:11,090 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/ab7dae0cd21d48f493d39cd1175d9f18 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ab7dae0cd21d48f493d39cd1175d9f18 2024-12-10T14:26:11,096 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into ab7dae0cd21d48f493d39cd1175d9f18(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:11,096 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:11,096 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=12, startTime=1733840770650; duration=0sec 2024-12-10T14:26:11,096 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:11,096 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:26:11,096 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:26:11,099 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/3977b7a642794e35b5fa8a07d13c9525 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3977b7a642794e35b5fa8a07d13c9525 2024-12-10T14:26:11,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47578 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:26:11,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:26:11,099 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:11,099 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/50e7c334eb994fc78e627d1c49b08930, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/34659317362947308c7960ce981a239e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/519566d4322141f7856c962ddee47dd8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/292cb55d988647028e40b9e493d7bd86] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=46.5 K 2024-12-10T14:26:11,101 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 50e7c334eb994fc78e627d1c49b08930, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1733840765022 2024-12-10T14:26:11,101 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 34659317362947308c7960ce981a239e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733840765025 2024-12-10T14:26:11,101 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 519566d4322141f7856c962ddee47dd8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1733840765678 2024-12-10T14:26:11,102 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 292cb55d988647028e40b9e493d7bd86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840767803 2024-12-10T14:26:11,105 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into 3977b7a642794e35b5fa8a07d13c9525(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:11,105 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:11,105 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=12, startTime=1733840770650; duration=0sec 2024-12-10T14:26:11,105 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:11,105 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:26:11,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:11,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. as already flushing 2024-12-10T14:26:11,112 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#192 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:11,113 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/26293a511ba44a799a32962fc448a6e1 is 50, key is test_row_0/C:col10/1733840768950/Put/seqid=0 2024-12-10T14:26:11,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742063_1239 (size=13255) 2024-12-10T14:26:11,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840831116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840831118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840831118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840831118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,126 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/26293a511ba44a799a32962fc448a6e1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/26293a511ba44a799a32962fc448a6e1 2024-12-10T14:26:11,131 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 26293a511ba44a799a32962fc448a6e1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:11,131 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:11,131 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=12, startTime=1733840770650; duration=0sec 2024-12-10T14:26:11,131 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:11,131 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:26:11,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:11,137 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107c057feb5dbf45178463f350b45be6dd_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107c057feb5dbf45178463f350b45be6dd_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:11,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/e3f276d536bb419fbd8b56a0907cfc0d, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:11,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/e3f276d536bb419fbd8b56a0907cfc0d is 175, key is test_row_0/A:col10/1733840768985/Put/seqid=0 2024-12-10T14:26:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742064_1240 (size=31255) 2024-12-10T14:26:11,144 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/e3f276d536bb419fbd8b56a0907cfc0d 2024-12-10T14:26:11,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/d5474008da9c479cad2a497422180f3c is 50, key is test_row_0/B:col10/1733840768985/Put/seqid=0 2024-12-10T14:26:11,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742065_1241 (size=12301) 2024-12-10T14:26:11,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840831221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840831222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840831222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840831223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840831423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840831425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840831426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840831427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-10T14:26:11,558 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/d5474008da9c479cad2a497422180f3c 2024-12-10T14:26:11,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/873620a8ea1247c1a8e9cb193b0f1f5b is 50, key is test_row_0/C:col10/1733840768985/Put/seqid=0 2024-12-10T14:26:11,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742066_1242 (size=12301) 2024-12-10T14:26:11,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48014 deadline: 1733840831726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48034 deadline: 1733840831727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48070 deadline: 1733840831728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48026 deadline: 1733840831728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:11,871 DEBUG [Thread-624 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c38ee58 to 127.0.0.1:58494 2024-12-10T14:26:11,871 DEBUG [Thread-624 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:11,871 DEBUG [Thread-622 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c7940d9 to 127.0.0.1:58494 2024-12-10T14:26:11,871 DEBUG [Thread-622 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:11,872 DEBUG [Thread-620 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b727d6e to 127.0.0.1:58494 2024-12-10T14:26:11,872 DEBUG [Thread-620 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:11,875 DEBUG [Thread-626 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x630684bf to 127.0.0.1:58494 2024-12-10T14:26:11,875 DEBUG [Thread-626 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:11,970 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/873620a8ea1247c1a8e9cb193b0f1f5b 2024-12-10T14:26:11,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/e3f276d536bb419fbd8b56a0907cfc0d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e3f276d536bb419fbd8b56a0907cfc0d 2024-12-10T14:26:11,978 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e3f276d536bb419fbd8b56a0907cfc0d, entries=150, sequenceid=422, filesize=30.5 K 2024-12-10T14:26:11,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/d5474008da9c479cad2a497422180f3c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d5474008da9c479cad2a497422180f3c 2024-12-10T14:26:11,982 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d5474008da9c479cad2a497422180f3c, entries=150, sequenceid=422, filesize=12.0 K 2024-12-10T14:26:11,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/873620a8ea1247c1a8e9cb193b0f1f5b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/873620a8ea1247c1a8e9cb193b0f1f5b 2024-12-10T14:26:11,986 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/873620a8ea1247c1a8e9cb193b0f1f5b, entries=150, sequenceid=422, filesize=12.0 K 2024-12-10T14:26:11,986 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 18673fce5a633353d821462d51dbbd4b in 1266ms, sequenceid=422, compaction requested=false 2024-12-10T14:26:11,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:11,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:11,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-10T14:26:11,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-10T14:26:11,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-10T14:26:11,989 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6450 sec 2024-12-10T14:26:11,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.6480 sec 2024-12-10T14:26:12,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:12,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T14:26:12,231 DEBUG [Thread-609 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bad2e85 to 127.0.0.1:58494 2024-12-10T14:26:12,231 DEBUG [Thread-609 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:12,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:12,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:12,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:12,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:12,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:12,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:12,233 DEBUG [Thread-611 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x328f994d to 127.0.0.1:58494 2024-12-10T14:26:12,233 DEBUG [Thread-611 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:12,234 DEBUG [Thread-615 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465dc764 to 127.0.0.1:58494 2024-12-10T14:26:12,235 DEBUG [Thread-615 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:12,235 DEBUG [Thread-613 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19a533a3 to 127.0.0.1:58494 2024-12-10T14:26:12,235 DEBUG [Thread-613 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:12,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102ed6c7d22e1e4f2a9dc34647961c5a4a_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840772230/Put/seqid=0 2024-12-10T14:26:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742067_1243 (size=12454) 2024-12-10T14:26:12,646 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:12,650 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102ed6c7d22e1e4f2a9dc34647961c5a4a_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102ed6c7d22e1e4f2a9dc34647961c5a4a_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:12,651 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/bca8694281f24f5e88ac8b5b40744877, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:12,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/bca8694281f24f5e88ac8b5b40744877 is 175, key is test_row_0/A:col10/1733840772230/Put/seqid=0 2024-12-10T14:26:12,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742068_1244 (size=31255) 2024-12-10T14:26:13,056 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=438, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/bca8694281f24f5e88ac8b5b40744877 2024-12-10T14:26:13,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dfb2754774204fe6a8ec8a7750e92a24 is 50, key is test_row_0/B:col10/1733840772230/Put/seqid=0 2024-12-10T14:26:13,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742069_1245 (size=12301) 2024-12-10T14:26:13,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-10T14:26:13,448 INFO [Thread-619 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-10T14:26:13,466 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dfb2754774204fe6a8ec8a7750e92a24 2024-12-10T14:26:13,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/01ffe5bba7fa450f8c057f2130a0194a is 50, key is test_row_0/C:col10/1733840772230/Put/seqid=0 2024-12-10T14:26:13,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742070_1246 (size=12301) 2024-12-10T14:26:13,825 DEBUG [Thread-617 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:58494 2024-12-10T14:26:13,825 DEBUG [Thread-617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7279 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7457 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3139 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9417 rows 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3137 2024-12-10T14:26:13,825 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9411 rows 2024-12-10T14:26:13,825 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:26:13,825 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x350b322d to 127.0.0.1:58494 2024-12-10T14:26:13,825 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:13,831 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T14:26:13,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T14:26:13,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:13,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:26:13,835 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840773835"}]},"ts":"1733840773835"} 2024-12-10T14:26:13,836 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T14:26:13,838 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T14:26:13,838 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:26:13,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, UNASSIGN}] 2024-12-10T14:26:13,840 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, UNASSIGN 2024-12-10T14:26:13,840 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:13,841 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:26:13,841 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:26:13,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/01ffe5bba7fa450f8c057f2130a0194a 2024-12-10T14:26:13,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/bca8694281f24f5e88ac8b5b40744877 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/bca8694281f24f5e88ac8b5b40744877 2024-12-10T14:26:13,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/bca8694281f24f5e88ac8b5b40744877, entries=150, sequenceid=438, filesize=30.5 K 2024-12-10T14:26:13,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/dfb2754774204fe6a8ec8a7750e92a24 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dfb2754774204fe6a8ec8a7750e92a24 2024-12-10T14:26:13,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dfb2754774204fe6a8ec8a7750e92a24, entries=150, sequenceid=438, filesize=12.0 K 2024-12-10T14:26:13,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/01ffe5bba7fa450f8c057f2130a0194a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/01ffe5bba7fa450f8c057f2130a0194a 2024-12-10T14:26:13,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/01ffe5bba7fa450f8c057f2130a0194a, entries=150, sequenceid=438, filesize=12.0 K 2024-12-10T14:26:13,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=26.84 KB/27480 for 18673fce5a633353d821462d51dbbd4b in 1661ms, sequenceid=438, compaction requested=true 2024-12-10T14:26:13,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:13,893 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 18673fce5a633353d821462d51dbbd4b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:13,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:13,893 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:13,894 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94719 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:13,894 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/A is initiating minor compaction (all files) 2024-12-10T14:26:13,894 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/A in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:13,894 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3977b7a642794e35b5fa8a07d13c9525, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e3f276d536bb419fbd8b56a0907cfc0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/bca8694281f24f5e88ac8b5b40744877] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=92.5 K 2024-12-10T14:26:13,894 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:13,894 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:13,894 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3977b7a642794e35b5fa8a07d13c9525, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e3f276d536bb419fbd8b56a0907cfc0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/bca8694281f24f5e88ac8b5b40744877] 2024-12-10T14:26:13,894 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/B is initiating minor compaction (all files) 2024-12-10T14:26:13,894 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/B in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:13,894 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ab7dae0cd21d48f493d39cd1175d9f18, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d5474008da9c479cad2a497422180f3c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dfb2754774204fe6a8ec8a7750e92a24] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=37.0 K 2024-12-10T14:26:13,894 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3977b7a642794e35b5fa8a07d13c9525, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840767803 2024-12-10T14:26:13,894 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ab7dae0cd21d48f493d39cd1175d9f18, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840767803 2024-12-10T14:26:13,895 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3f276d536bb419fbd8b56a0907cfc0d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733840768977 2024-12-10T14:26:13,895 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting d5474008da9c479cad2a497422180f3c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733840768977 2024-12-10T14:26:13,895 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting bca8694281f24f5e88ac8b5b40744877, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1733840771116 2024-12-10T14:26:13,895 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting dfb2754774204fe6a8ec8a7750e92a24, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1733840771116 2024-12-10T14:26:13,904 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#B#compaction#198 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:13,905 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/da3606be43454f46acdc4219fe9690f8 is 50, key is test_row_0/B:col10/1733840772230/Put/seqid=0 2024-12-10T14:26:13,905 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:13,909 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210cbfcb1ddfa7049568c1a0548500ee292_18673fce5a633353d821462d51dbbd4b store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:13,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742071_1247 (size=13357) 2024-12-10T14:26:13,916 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/da3606be43454f46acdc4219fe9690f8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/da3606be43454f46acdc4219fe9690f8 2024-12-10T14:26:13,922 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/B of 18673fce5a633353d821462d51dbbd4b into da3606be43454f46acdc4219fe9690f8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:13,922 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:13,922 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/B, priority=13, startTime=1733840773893; duration=0sec 2024-12-10T14:26:13,922 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:13,922 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:B 2024-12-10T14:26:13,922 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:13,923 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:13,923 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 18673fce5a633353d821462d51dbbd4b/C is initiating minor compaction (all files) 2024-12-10T14:26:13,923 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 18673fce5a633353d821462d51dbbd4b/C in TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:13,923 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/26293a511ba44a799a32962fc448a6e1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/873620a8ea1247c1a8e9cb193b0f1f5b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/01ffe5bba7fa450f8c057f2130a0194a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp, totalSize=37.0 K 2024-12-10T14:26:13,924 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 26293a511ba44a799a32962fc448a6e1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733840767803 2024-12-10T14:26:13,924 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 873620a8ea1247c1a8e9cb193b0f1f5b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1733840768977 2024-12-10T14:26:13,925 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 01ffe5bba7fa450f8c057f2130a0194a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1733840771116 2024-12-10T14:26:13,933 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210cbfcb1ddfa7049568c1a0548500ee292_18673fce5a633353d821462d51dbbd4b, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:13,934 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210cbfcb1ddfa7049568c1a0548500ee292_18673fce5a633353d821462d51dbbd4b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:26:13,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742072_1248 (size=4469) 2024-12-10T14:26:13,939 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#C#compaction#200 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:13,940 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/4accb6d59b31446fb708897d78b5479f is 50, key is test_row_0/C:col10/1733840772230/Put/seqid=0 2024-12-10T14:26:13,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742073_1249 (size=13357) 2024-12-10T14:26:13,993 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:13,993 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:13,993 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:26:13,993 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 18673fce5a633353d821462d51dbbd4b, disabling compactions & flushes 2024-12-10T14:26:13,993 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:14,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:26:14,339 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 18673fce5a633353d821462d51dbbd4b#A#compaction#199 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:14,340 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/e7e84dbba52c4560acc6125ef8285280 is 175, key is test_row_0/A:col10/1733840772230/Put/seqid=0 2024-12-10T14:26:14,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742074_1250 (size=32311) 2024-12-10T14:26:14,348 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/4accb6d59b31446fb708897d78b5479f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/4accb6d59b31446fb708897d78b5479f 2024-12-10T14:26:14,352 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/C of 18673fce5a633353d821462d51dbbd4b into 4accb6d59b31446fb708897d78b5479f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:14,352 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:14,352 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/C, priority=13, startTime=1733840773893; duration=0sec 2024-12-10T14:26:14,352 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:14,352 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:C 2024-12-10T14:26:14,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:26:14,748 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/e7e84dbba52c4560acc6125ef8285280 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e7e84dbba52c4560acc6125ef8285280 2024-12-10T14:26:14,752 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 18673fce5a633353d821462d51dbbd4b/A of 18673fce5a633353d821462d51dbbd4b into e7e84dbba52c4560acc6125ef8285280(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:14,752 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:14,752 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b., storeName=18673fce5a633353d821462d51dbbd4b/A, priority=13, startTime=1733840773893; duration=0sec 2024-12-10T14:26:14,752 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:14,752 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:14,752 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:14,752 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. after waiting 0 ms 2024-12-10T14:26:14,752 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 18673fce5a633353d821462d51dbbd4b:A 2024-12-10T14:26:14,752 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:14,752 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 18673fce5a633353d821462d51dbbd4b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:26:14,753 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=A 2024-12-10T14:26:14,753 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:14,753 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=B 2024-12-10T14:26:14,753 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:14,753 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 18673fce5a633353d821462d51dbbd4b, store=C 2024-12-10T14:26:14,753 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:14,758 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210152bd951e5e84d529ef5b999426b8244_18673fce5a633353d821462d51dbbd4b is 50, key is test_row_0/A:col10/1733840773824/Put/seqid=0 2024-12-10T14:26:14,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742075_1251 (size=12454) 2024-12-10T14:26:14,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:26:15,164 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:15,168 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210152bd951e5e84d529ef5b999426b8244_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210152bd951e5e84d529ef5b999426b8244_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:15,169 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/a0ef87c5f5b848f5be581db4e15ae17b, store: [table=TestAcidGuarantees family=A region=18673fce5a633353d821462d51dbbd4b] 2024-12-10T14:26:15,169 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/a0ef87c5f5b848f5be581db4e15ae17b is 175, key is test_row_0/A:col10/1733840773824/Put/seqid=0 2024-12-10T14:26:15,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742076_1252 (size=31255) 2024-12-10T14:26:15,574 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=448, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/a0ef87c5f5b848f5be581db4e15ae17b 2024-12-10T14:26:15,581 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/83d5010b181d478fb766ccac0fd47165 is 50, key is test_row_0/B:col10/1733840773824/Put/seqid=0 2024-12-10T14:26:15,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742077_1253 (size=12301) 2024-12-10T14:26:15,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:26:15,985 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/83d5010b181d478fb766ccac0fd47165 2024-12-10T14:26:15,992 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/cab06b0980b548ad96185b0740711431 is 50, key is test_row_0/C:col10/1733840773824/Put/seqid=0 2024-12-10T14:26:15,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742078_1254 (size=12301) 2024-12-10T14:26:16,177 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:26:16,396 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/cab06b0980b548ad96185b0740711431 2024-12-10T14:26:16,400 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/A/a0ef87c5f5b848f5be581db4e15ae17b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/a0ef87c5f5b848f5be581db4e15ae17b 2024-12-10T14:26:16,403 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/a0ef87c5f5b848f5be581db4e15ae17b, entries=150, sequenceid=448, filesize=30.5 K 2024-12-10T14:26:16,404 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/B/83d5010b181d478fb766ccac0fd47165 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/83d5010b181d478fb766ccac0fd47165 2024-12-10T14:26:16,407 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/83d5010b181d478fb766ccac0fd47165, entries=150, sequenceid=448, filesize=12.0 K 2024-12-10T14:26:16,408 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/.tmp/C/cab06b0980b548ad96185b0740711431 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cab06b0980b548ad96185b0740711431 2024-12-10T14:26:16,411 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cab06b0980b548ad96185b0740711431, entries=150, sequenceid=448, filesize=12.0 K 2024-12-10T14:26:16,412 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 18673fce5a633353d821462d51dbbd4b in 1660ms, sequenceid=448, compaction requested=false 2024-12-10T14:26:16,412 DEBUG [StoreCloser-TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1e07dce988f8442c8f98706064270301, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/932424f52f3b42a79127d0562621a038, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/0370378b760f40bc8676c07ff6657fd5, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3c16834fe657460abef1ba5c594124da, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2ffe952d943a4b9d9d95b002a7b7b657, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9ce358d1ce61434699158006db928cff, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/81d3dcdaa8164ff7a72587e42c23654d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/d8e94630bfbc41a4b20b40d104141d58, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9f27fd110777476eb3ea9491700138f2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/54f02e6b3c4642a58591303f667e8da0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/23587deb52f441299c81816952ef5c1d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/01487750a17040f3a2423e7d4e33f54c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2020bbdf9e8e4874bb8b0deb66cf4cf6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/982b9a627c644292999927898d459434, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/fa993a83c5754d36bc8afe664a60ff0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/83760d295ee24efab581fc423959e7e3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/24a1be64548c46719a4c9822aa62e535, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b8e7fcf751b7490c9578a91dd358d608, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1b954cf6f6984d7aaa28977a60d4720a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/de51589484034130bf6ea748da1737cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/cb1ee0f725624bd2b09fe052f23bf141, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/89f52233ffa140f59734651e5dd36f3a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b100670681804c0e95632a29af50b418, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/6502997d43684d4191f48c8121dc9650, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1bf67c9762b64ebfbf106fa5430ac253, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b54f9927fe5140288645cac1d7e3f6cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/7c9a954b02e2413f82aff0835b605537, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3977b7a642794e35b5fa8a07d13c9525, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e3f276d536bb419fbd8b56a0907cfc0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/bca8694281f24f5e88ac8b5b40744877] to archive 2024-12-10T14:26:16,413 DEBUG [StoreCloser-TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:26:16,415 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/0370378b760f40bc8676c07ff6657fd5 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/0370378b760f40bc8676c07ff6657fd5 2024-12-10T14:26:16,415 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3c16834fe657460abef1ba5c594124da to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3c16834fe657460abef1ba5c594124da 2024-12-10T14:26:16,416 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/d8e94630bfbc41a4b20b40d104141d58 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/d8e94630bfbc41a4b20b40d104141d58 2024-12-10T14:26:16,416 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2ffe952d943a4b9d9d95b002a7b7b657 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2ffe952d943a4b9d9d95b002a7b7b657 2024-12-10T14:26:16,416 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/932424f52f3b42a79127d0562621a038 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/932424f52f3b42a79127d0562621a038 2024-12-10T14:26:16,416 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1e07dce988f8442c8f98706064270301 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1e07dce988f8442c8f98706064270301 2024-12-10T14:26:16,416 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9ce358d1ce61434699158006db928cff to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9ce358d1ce61434699158006db928cff 2024-12-10T14:26:16,417 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/81d3dcdaa8164ff7a72587e42c23654d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/81d3dcdaa8164ff7a72587e42c23654d 2024-12-10T14:26:16,418 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9f27fd110777476eb3ea9491700138f2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/9f27fd110777476eb3ea9491700138f2 2024-12-10T14:26:16,418 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/54f02e6b3c4642a58591303f667e8da0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/54f02e6b3c4642a58591303f667e8da0 2024-12-10T14:26:16,418 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/01487750a17040f3a2423e7d4e33f54c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/01487750a17040f3a2423e7d4e33f54c 2024-12-10T14:26:16,419 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/23587deb52f441299c81816952ef5c1d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/23587deb52f441299c81816952ef5c1d 2024-12-10T14:26:16,419 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/fa993a83c5754d36bc8afe664a60ff0d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/fa993a83c5754d36bc8afe664a60ff0d 2024-12-10T14:26:16,419 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2020bbdf9e8e4874bb8b0deb66cf4cf6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/2020bbdf9e8e4874bb8b0deb66cf4cf6 2024-12-10T14:26:16,419 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/982b9a627c644292999927898d459434 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/982b9a627c644292999927898d459434 2024-12-10T14:26:16,420 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/83760d295ee24efab581fc423959e7e3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/83760d295ee24efab581fc423959e7e3 2024-12-10T14:26:16,421 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/24a1be64548c46719a4c9822aa62e535 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/24a1be64548c46719a4c9822aa62e535 2024-12-10T14:26:16,421 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1b954cf6f6984d7aaa28977a60d4720a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1b954cf6f6984d7aaa28977a60d4720a 2024-12-10T14:26:16,421 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b8e7fcf751b7490c9578a91dd358d608 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b8e7fcf751b7490c9578a91dd358d608 2024-12-10T14:26:16,422 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/de51589484034130bf6ea748da1737cf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/de51589484034130bf6ea748da1737cf 2024-12-10T14:26:16,422 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/89f52233ffa140f59734651e5dd36f3a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/89f52233ffa140f59734651e5dd36f3a 2024-12-10T14:26:16,422 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b100670681804c0e95632a29af50b418 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b100670681804c0e95632a29af50b418 2024-12-10T14:26:16,422 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/cb1ee0f725624bd2b09fe052f23bf141 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/cb1ee0f725624bd2b09fe052f23bf141 2024-12-10T14:26:16,423 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/6502997d43684d4191f48c8121dc9650 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/6502997d43684d4191f48c8121dc9650 2024-12-10T14:26:16,423 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b54f9927fe5140288645cac1d7e3f6cb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/b54f9927fe5140288645cac1d7e3f6cb 2024-12-10T14:26:16,423 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/5b16d39be4dc4b3a8ed8e5eaaad2ae7f 2024-12-10T14:26:16,424 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/7c9a954b02e2413f82aff0835b605537 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/7c9a954b02e2413f82aff0835b605537 2024-12-10T14:26:16,424 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1bf67c9762b64ebfbf106fa5430ac253 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/1bf67c9762b64ebfbf106fa5430ac253 2024-12-10T14:26:16,424 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/bca8694281f24f5e88ac8b5b40744877 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/bca8694281f24f5e88ac8b5b40744877 2024-12-10T14:26:16,424 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e3f276d536bb419fbd8b56a0907cfc0d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e3f276d536bb419fbd8b56a0907cfc0d 2024-12-10T14:26:16,425 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3977b7a642794e35b5fa8a07d13c9525 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/3977b7a642794e35b5fa8a07d13c9525 2024-12-10T14:26:16,426 DEBUG [StoreCloser-TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/c6c5da171a7b440090b68a43ef370e90, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/174897c4b9a54632acc5b63065f14b33, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/9155bc4f449c4abc82ce418e38ac070f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3c308ff6930e450894096af79085f656, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5344d2d423a943eb96499057ece0b868, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/345376465be349be96bc11299aa1b7a1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6dc60c15968741c79b532f86d22cd02b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6f46bfe626744f9a87ab4b3e7106a326, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e76f11b57ded4593be90c9a0b7a37f54, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e97bd19a261447c69b73b0a8b97aff93, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b2e7c7536b2f439897886ff73c5164d4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/146748eb451441208095649b0b88ede0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d49aafad3a714d769a6ccdce7342eddf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/80e1158b18294667b6bbab06a71e0502, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dbb1303755634816a28660c30f6380cd, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5463973440a147cc931b572f82d81ad3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ba3b7ec1876a4098998381d3154198cc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/10b8b37702bf43a99691f43950596527, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e794136822754eca91cc778d469e8822, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dab6624a318a4e16ab450beef69a21fd, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/43361b7c33184ad7b1d062b3de94d601, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/a145d8ba2cb8410a9020d50f94dd82dc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/af67abc5bae74ead85b5b230924575f7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/06031c52223e46ea87203a1f8c9e2ceb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b46bb416e8154d9e944de5503e083ba1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3ae8ee3d39c64c1cb203859f507a92d0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3696ef225ec546429baaaffe26b9feff, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ab7dae0cd21d48f493d39cd1175d9f18, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/324e4ba8d9f941759d1aaa55048b4c31, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d5474008da9c479cad2a497422180f3c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dfb2754774204fe6a8ec8a7750e92a24] to archive 2024-12-10T14:26:16,427 DEBUG [StoreCloser-TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:26:16,429 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/c6c5da171a7b440090b68a43ef370e90 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/c6c5da171a7b440090b68a43ef370e90 2024-12-10T14:26:16,429 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3c308ff6930e450894096af79085f656 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3c308ff6930e450894096af79085f656 2024-12-10T14:26:16,430 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/9155bc4f449c4abc82ce418e38ac070f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/9155bc4f449c4abc82ce418e38ac070f 2024-12-10T14:26:16,430 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/345376465be349be96bc11299aa1b7a1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/345376465be349be96bc11299aa1b7a1 2024-12-10T14:26:16,431 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/174897c4b9a54632acc5b63065f14b33 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/174897c4b9a54632acc5b63065f14b33 2024-12-10T14:26:16,431 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6dc60c15968741c79b532f86d22cd02b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6dc60c15968741c79b532f86d22cd02b 2024-12-10T14:26:16,431 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6f46bfe626744f9a87ab4b3e7106a326 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/6f46bfe626744f9a87ab4b3e7106a326 2024-12-10T14:26:16,431 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5344d2d423a943eb96499057ece0b868 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5344d2d423a943eb96499057ece0b868 2024-12-10T14:26:16,432 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e76f11b57ded4593be90c9a0b7a37f54 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e76f11b57ded4593be90c9a0b7a37f54 2024-12-10T14:26:16,432 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e97bd19a261447c69b73b0a8b97aff93 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e97bd19a261447c69b73b0a8b97aff93 2024-12-10T14:26:16,433 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/146748eb451441208095649b0b88ede0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/146748eb451441208095649b0b88ede0 2024-12-10T14:26:16,433 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d49aafad3a714d769a6ccdce7342eddf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d49aafad3a714d769a6ccdce7342eddf 2024-12-10T14:26:16,433 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b2e7c7536b2f439897886ff73c5164d4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b2e7c7536b2f439897886ff73c5164d4 2024-12-10T14:26:16,434 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/80e1158b18294667b6bbab06a71e0502 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/80e1158b18294667b6bbab06a71e0502 2024-12-10T14:26:16,434 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5463973440a147cc931b572f82d81ad3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/5463973440a147cc931b572f82d81ad3 2024-12-10T14:26:16,434 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dbb1303755634816a28660c30f6380cd to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dbb1303755634816a28660c30f6380cd 2024-12-10T14:26:16,435 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/10b8b37702bf43a99691f43950596527 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/10b8b37702bf43a99691f43950596527 2024-12-10T14:26:16,435 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ba3b7ec1876a4098998381d3154198cc to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ba3b7ec1876a4098998381d3154198cc 2024-12-10T14:26:16,436 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e794136822754eca91cc778d469e8822 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/e794136822754eca91cc778d469e8822 2024-12-10T14:26:16,436 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dab6624a318a4e16ab450beef69a21fd to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dab6624a318a4e16ab450beef69a21fd 2024-12-10T14:26:16,437 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/43361b7c33184ad7b1d062b3de94d601 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/43361b7c33184ad7b1d062b3de94d601 2024-12-10T14:26:16,437 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/af67abc5bae74ead85b5b230924575f7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/af67abc5bae74ead85b5b230924575f7 2024-12-10T14:26:16,437 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/a145d8ba2cb8410a9020d50f94dd82dc to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/a145d8ba2cb8410a9020d50f94dd82dc 2024-12-10T14:26:16,438 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3ae8ee3d39c64c1cb203859f507a92d0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3ae8ee3d39c64c1cb203859f507a92d0 2024-12-10T14:26:16,438 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/06031c52223e46ea87203a1f8c9e2ceb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/06031c52223e46ea87203a1f8c9e2ceb 2024-12-10T14:26:16,438 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b46bb416e8154d9e944de5503e083ba1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/b46bb416e8154d9e944de5503e083ba1 2024-12-10T14:26:16,439 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3696ef225ec546429baaaffe26b9feff to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/3696ef225ec546429baaaffe26b9feff 2024-12-10T14:26:16,439 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ab7dae0cd21d48f493d39cd1175d9f18 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/ab7dae0cd21d48f493d39cd1175d9f18 2024-12-10T14:26:16,440 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/324e4ba8d9f941759d1aaa55048b4c31 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/324e4ba8d9f941759d1aaa55048b4c31 2024-12-10T14:26:16,440 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d5474008da9c479cad2a497422180f3c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/d5474008da9c479cad2a497422180f3c 2024-12-10T14:26:16,440 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dfb2754774204fe6a8ec8a7750e92a24 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/dfb2754774204fe6a8ec8a7750e92a24 2024-12-10T14:26:16,441 DEBUG [StoreCloser-TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/95c85aa3894449968fd51cf01876f6e7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/a66147102d584e94b936c06f5834891b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/6fb78580c4794ad5a1c85cf9b026ca70, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/46bf21c9a7aa44799ad8572464bbe367, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e29e56c717c1471696a385b9fb99c272, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/355d75d27aed4df0a15a1549eb5a5e2c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/9f0e4ecb86594f12aa60a3a6cf342889, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/05338e4ed37b40c4ae95aeae566c6552, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/da817e77110a4b1493f4ba405e56a453, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fb088ab4181c4d8fb87e2cca94406787, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/690c2e2297bf4b65a85957e1b95dde09, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/66ded0a0026849c98fa957305a022b1d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8a405f8c3e3541ccbd34bd12c556931f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/762addc4abb6485e999c0f913060db5c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cc411183c9a64116b1aa9b0809a30ee6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/7634b2eaae2e444fa2672976361b8645, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8099ba84afdd4104a85f0243df59bd4f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/961b667a0da44340ba0fc889af46505e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/1b78a1cf0a5845ac916a037ddfb29df2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/19344c7bfb8b4636a96437f0e38daec3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/618c96041a6b45a8a81c1bb45eb1581b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e631c73780bb4263a7d496596871fdf0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fce3b8cd53a44c84a954fe2b66c3603a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/50e7c334eb994fc78e627d1c49b08930, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/35e42408935e48519cf0e4215f1adf18, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/34659317362947308c7960ce981a239e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/519566d4322141f7856c962ddee47dd8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/26293a511ba44a799a32962fc448a6e1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/292cb55d988647028e40b9e493d7bd86, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/873620a8ea1247c1a8e9cb193b0f1f5b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/01ffe5bba7fa450f8c057f2130a0194a] to archive 2024-12-10T14:26:16,442 DEBUG [StoreCloser-TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:26:16,445 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/95c85aa3894449968fd51cf01876f6e7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/95c85aa3894449968fd51cf01876f6e7 2024-12-10T14:26:16,446 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/a66147102d584e94b936c06f5834891b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/a66147102d584e94b936c06f5834891b 2024-12-10T14:26:16,446 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e29e56c717c1471696a385b9fb99c272 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e29e56c717c1471696a385b9fb99c272 2024-12-10T14:26:16,446 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/355d75d27aed4df0a15a1549eb5a5e2c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/355d75d27aed4df0a15a1549eb5a5e2c 2024-12-10T14:26:16,446 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/05338e4ed37b40c4ae95aeae566c6552 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/05338e4ed37b40c4ae95aeae566c6552 2024-12-10T14:26:16,446 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/46bf21c9a7aa44799ad8572464bbe367 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/46bf21c9a7aa44799ad8572464bbe367 2024-12-10T14:26:16,446 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/9f0e4ecb86594f12aa60a3a6cf342889 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/9f0e4ecb86594f12aa60a3a6cf342889 2024-12-10T14:26:16,446 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/6fb78580c4794ad5a1c85cf9b026ca70 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/6fb78580c4794ad5a1c85cf9b026ca70 2024-12-10T14:26:16,448 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fb088ab4181c4d8fb87e2cca94406787 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fb088ab4181c4d8fb87e2cca94406787 2024-12-10T14:26:16,448 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/da817e77110a4b1493f4ba405e56a453 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/da817e77110a4b1493f4ba405e56a453 2024-12-10T14:26:16,449 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/66ded0a0026849c98fa957305a022b1d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/66ded0a0026849c98fa957305a022b1d 2024-12-10T14:26:16,449 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cc411183c9a64116b1aa9b0809a30ee6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cc411183c9a64116b1aa9b0809a30ee6 2024-12-10T14:26:16,449 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/690c2e2297bf4b65a85957e1b95dde09 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/690c2e2297bf4b65a85957e1b95dde09 2024-12-10T14:26:16,449 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/762addc4abb6485e999c0f913060db5c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/762addc4abb6485e999c0f913060db5c 2024-12-10T14:26:16,449 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8a405f8c3e3541ccbd34bd12c556931f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8a405f8c3e3541ccbd34bd12c556931f 2024-12-10T14:26:16,450 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/7634b2eaae2e444fa2672976361b8645 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/7634b2eaae2e444fa2672976361b8645 2024-12-10T14:26:16,451 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/961b667a0da44340ba0fc889af46505e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/961b667a0da44340ba0fc889af46505e 2024-12-10T14:26:16,451 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8099ba84afdd4104a85f0243df59bd4f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/8099ba84afdd4104a85f0243df59bd4f 2024-12-10T14:26:16,452 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/19344c7bfb8b4636a96437f0e38daec3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/19344c7bfb8b4636a96437f0e38daec3 2024-12-10T14:26:16,452 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/1b78a1cf0a5845ac916a037ddfb29df2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/1b78a1cf0a5845ac916a037ddfb29df2 2024-12-10T14:26:16,452 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/618c96041a6b45a8a81c1bb45eb1581b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/618c96041a6b45a8a81c1bb45eb1581b 2024-12-10T14:26:16,452 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fce3b8cd53a44c84a954fe2b66c3603a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/fce3b8cd53a44c84a954fe2b66c3603a 2024-12-10T14:26:16,452 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e631c73780bb4263a7d496596871fdf0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/e631c73780bb4263a7d496596871fdf0 2024-12-10T14:26:16,453 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/50e7c334eb994fc78e627d1c49b08930 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/50e7c334eb994fc78e627d1c49b08930 2024-12-10T14:26:16,454 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/35e42408935e48519cf0e4215f1adf18 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/35e42408935e48519cf0e4215f1adf18 2024-12-10T14:26:16,454 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/34659317362947308c7960ce981a239e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/34659317362947308c7960ce981a239e 2024-12-10T14:26:16,455 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/292cb55d988647028e40b9e493d7bd86 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/292cb55d988647028e40b9e493d7bd86 2024-12-10T14:26:16,455 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/01ffe5bba7fa450f8c057f2130a0194a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/01ffe5bba7fa450f8c057f2130a0194a 2024-12-10T14:26:16,455 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/26293a511ba44a799a32962fc448a6e1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/26293a511ba44a799a32962fc448a6e1 2024-12-10T14:26:16,455 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/519566d4322141f7856c962ddee47dd8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/519566d4322141f7856c962ddee47dd8 2024-12-10T14:26:16,455 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/873620a8ea1247c1a8e9cb193b0f1f5b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/873620a8ea1247c1a8e9cb193b0f1f5b 2024-12-10T14:26:16,460 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/recovered.edits/451.seqid, newMaxSeqId=451, maxSeqId=4 2024-12-10T14:26:16,461 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b. 2024-12-10T14:26:16,461 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 18673fce5a633353d821462d51dbbd4b: 2024-12-10T14:26:16,463 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:16,463 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=18673fce5a633353d821462d51dbbd4b, regionState=CLOSED 2024-12-10T14:26:16,466 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-10T14:26:16,466 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 18673fce5a633353d821462d51dbbd4b, server=db1d50717577,46699,1733840717757 in 2.6230 sec 2024-12-10T14:26:16,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-10T14:26:16,467 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=18673fce5a633353d821462d51dbbd4b, UNASSIGN in 2.6270 sec 2024-12-10T14:26:16,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-10T14:26:16,469 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.6300 sec 2024-12-10T14:26:16,470 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840776470"}]},"ts":"1733840776470"} 2024-12-10T14:26:16,471 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T14:26:16,473 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T14:26:16,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.6420 sec 2024-12-10T14:26:17,525 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T14:26:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T14:26:17,939 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-10T14:26:17,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T14:26:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:17,941 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:17,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T14:26:17,942 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:17,943 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,945 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/recovered.edits] 2024-12-10T14:26:17,949 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e7e84dbba52c4560acc6125ef8285280 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/e7e84dbba52c4560acc6125ef8285280 2024-12-10T14:26:17,949 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/a0ef87c5f5b848f5be581db4e15ae17b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/A/a0ef87c5f5b848f5be581db4e15ae17b 2024-12-10T14:26:17,951 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/da3606be43454f46acdc4219fe9690f8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/da3606be43454f46acdc4219fe9690f8 2024-12-10T14:26:17,952 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/83d5010b181d478fb766ccac0fd47165 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/B/83d5010b181d478fb766ccac0fd47165 2024-12-10T14:26:17,954 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/4accb6d59b31446fb708897d78b5479f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/4accb6d59b31446fb708897d78b5479f 2024-12-10T14:26:17,954 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cab06b0980b548ad96185b0740711431 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/C/cab06b0980b548ad96185b0740711431 2024-12-10T14:26:17,957 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/recovered.edits/451.seqid to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b/recovered.edits/451.seqid 2024-12-10T14:26:17,958 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,958 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T14:26:17,958 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T14:26:17,959 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T14:26:17,968 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121009ab564882e44dab93ea9b671f24d99b_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121009ab564882e44dab93ea9b671f24d99b_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,968 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101f459b15b4924529ba7f54091bdbd106_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101f459b15b4924529ba7f54091bdbd106_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,968 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102ed6c7d22e1e4f2a9dc34647961c5a4a_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102ed6c7d22e1e4f2a9dc34647961c5a4a_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,968 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210152bd951e5e84d529ef5b999426b8244_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210152bd951e5e84d529ef5b999426b8244_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,968 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102142d5bf92f64355b0bd9bfcd3b43455_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102142d5bf92f64355b0bd9bfcd3b43455_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,968 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102d6086cd95cd4961842c5eb04035c8c5_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102d6086cd95cd4961842c5eb04035c8c5_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,968 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121041c8c2ad5c3e429fa28003e9c147bbc9_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121041c8c2ad5c3e429fa28003e9c147bbc9_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,969 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103ccc7f4b579e4385b0b3ad00ce59abc0_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103ccc7f4b579e4385b0b3ad00ce59abc0_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,970 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068db3a1e4b994065ac04f1321c5b0029_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068db3a1e4b994065ac04f1321c5b0029_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,970 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107fb25ca611984037a7aa8cdb2c9f2ccd_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107fb25ca611984037a7aa8cdb2c9f2ccd_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,970 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107c057feb5dbf45178463f350b45be6dd_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107c057feb5dbf45178463f350b45be6dd_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,970 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108c99c3b1d6a2494fa5b71c11bd1d5b4c_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108c99c3b1d6a2494fa5b71c11bd1d5b4c_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,970 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108abe9fc376cd4285b194b87d04a18db4_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108abe9fc376cd4285b194b87d04a18db4_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,971 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210945e1e4d8fff4d02896e77e39ac15226_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210945e1e4d8fff4d02896e77e39ac15226_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,971 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108f0aa1cd986a4df2a15822f6f5c378bf_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108f0aa1cd986a4df2a15822f6f5c378bf_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,971 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109d0125ea33664f66838dec3ba9041d97_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109d0125ea33664f66838dec3ba9041d97_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,972 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109f0404be05f94d018bd35ae9394ba3c9_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109f0404be05f94d018bd35ae9394ba3c9_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,972 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c40ff8ec329848139de47889c755f0c0_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c40ff8ec329848139de47889c755f0c0_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,972 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d6f7f7b204974020b8fe0183ef675a20_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d6f7f7b204974020b8fe0183ef675a20_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,972 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210df48c3e303a943a9a75e3910a3683bd8_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210df48c3e303a943a9a75e3910a3683bd8_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,972 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fddab759ba2a440b995ae9efb474a970_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fddab759ba2a440b995ae9efb474a970_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,973 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fa7d1ac8823e4b6b990d3df35769b4dd_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fa7d1ac8823e4b6b990d3df35769b4dd_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,973 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f8fcbe3d59c54e3bb32d5a00370c8f04_18673fce5a633353d821462d51dbbd4b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f8fcbe3d59c54e3bb32d5a00370c8f04_18673fce5a633353d821462d51dbbd4b 2024-12-10T14:26:17,973 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T14:26:17,975 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:17,978 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T14:26:17,980 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T14:26:17,980 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:17,980 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T14:26:17,981 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733840777980"}]},"ts":"9223372036854775807"} 2024-12-10T14:26:17,982 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T14:26:17,982 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 18673fce5a633353d821462d51dbbd4b, NAME => 'TestAcidGuarantees,,1733840748750.18673fce5a633353d821462d51dbbd4b.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T14:26:17,982 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T14:26:17,982 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733840777982"}]},"ts":"9223372036854775807"} 2024-12-10T14:26:17,984 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T14:26:17,988 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:17,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 48 msec 2024-12-10T14:26:18,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T14:26:18,042 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-10T14:26:18,052 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=248 (was 247) Potentially hanging thread: hconnection-0x540ea891-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1890355422_22 at /127.0.0.1:34954 [Waiting for operation #917] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x540ea891-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x540ea891-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1890355422_22 at /127.0.0.1:40510 [Waiting for operation #848] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x540ea891-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_105012982_22 at /127.0.0.1:51054 [Waiting for operation #440] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_105012982_22 at /127.0.0.1:51108 [Waiting for operation #448] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=462 (was 461) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=261 (was 222) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2387 (was 2550) 2024-12-10T14:26:18,060 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=248, OpenFileDescriptor=462, MaxFileDescriptor=1048576, SystemLoadAverage=261, ProcessCount=11, AvailableMemoryMB=2387 2024-12-10T14:26:18,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:26:18,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:26:18,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:18,063 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:26:18,063 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:18,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-12-10T14:26:18,064 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:26:18,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:26:18,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742079_1255 (size=963) 2024-12-10T14:26:18,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:26:18,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:26:18,471 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:26:18,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742080_1256 (size=53) 2024-12-10T14:26:18,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:26:18,878 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:26:18,878 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 718b511ced67f3365dbb07f1afd9efaa, disabling compactions & flushes 2024-12-10T14:26:18,878 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:18,878 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:18,878 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. after waiting 0 ms 2024-12-10T14:26:18,878 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:18,878 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:18,878 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:18,879 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:26:18,879 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733840778879"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733840778879"}]},"ts":"1733840778879"} 2024-12-10T14:26:18,880 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:26:18,881 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:26:18,881 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840778881"}]},"ts":"1733840778881"} 2024-12-10T14:26:18,882 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T14:26:18,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=718b511ced67f3365dbb07f1afd9efaa, ASSIGN}] 2024-12-10T14:26:18,886 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=718b511ced67f3365dbb07f1afd9efaa, ASSIGN 2024-12-10T14:26:18,886 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=718b511ced67f3365dbb07f1afd9efaa, ASSIGN; state=OFFLINE, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=false 2024-12-10T14:26:19,037 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=718b511ced67f3365dbb07f1afd9efaa, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:19,038 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:26:19,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:26:19,190 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:19,192 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:19,192 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:26:19,193 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,193 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:26:19,193 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,193 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,194 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,195 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:19,195 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 718b511ced67f3365dbb07f1afd9efaa columnFamilyName A 2024-12-10T14:26:19,195 DEBUG [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:19,196 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.HStore(327): Store=718b511ced67f3365dbb07f1afd9efaa/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:19,196 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,197 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:19,197 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 718b511ced67f3365dbb07f1afd9efaa columnFamilyName B 2024-12-10T14:26:19,197 DEBUG [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:19,197 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.HStore(327): Store=718b511ced67f3365dbb07f1afd9efaa/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:19,198 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,198 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:19,199 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 718b511ced67f3365dbb07f1afd9efaa columnFamilyName C 2024-12-10T14:26:19,199 DEBUG [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:19,199 INFO [StoreOpener-718b511ced67f3365dbb07f1afd9efaa-1 {}] regionserver.HStore(327): Store=718b511ced67f3365dbb07f1afd9efaa/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:19,199 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:19,200 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,200 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,202 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:26:19,203 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:19,205 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:26:19,205 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 718b511ced67f3365dbb07f1afd9efaa; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70607526, jitterRate=0.05213412642478943}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:26:19,206 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:19,207 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., pid=70, masterSystemTime=1733840779189 2024-12-10T14:26:19,208 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:19,208 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:19,208 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=718b511ced67f3365dbb07f1afd9efaa, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:19,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-10T14:26:19,210 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 in 171 msec 2024-12-10T14:26:19,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-10T14:26:19,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=718b511ced67f3365dbb07f1afd9efaa, ASSIGN in 325 msec 2024-12-10T14:26:19,212 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:26:19,213 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840779212"}]},"ts":"1733840779212"} 2024-12-10T14:26:19,213 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T14:26:19,215 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:26:19,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1530 sec 2024-12-10T14:26:20,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-10T14:26:20,168 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-12-10T14:26:20,169 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-12-10T14:26:20,173 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,175 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,176 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,177 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:26:20,178 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:26:20,180 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-12-10T14:26:20,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,183 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-12-10T14:26:20,186 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,187 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-12-10T14:26:20,189 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,190 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03a703d2 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17cf7fc0 2024-12-10T14:26:20,194 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560ec309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,195 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-12-10T14:26:20,197 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,198 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-12-10T14:26:20,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,201 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-12-10T14:26:20,204 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,205 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-12-10T14:26:20,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,209 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d1403c3 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328852db 2024-12-10T14:26:20,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1730a60f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,212 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3bf0ba59 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b9e2976 2024-12-10T14:26:20,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@598cfed4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:20,217 DEBUG [hconnection-0x5aba5d0a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:20,217 DEBUG [hconnection-0xeaef610-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-10T14:26:20,218 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,218 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57448, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,219 DEBUG [hconnection-0x2fa6ce6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:26:20,219 DEBUG [hconnection-0x4a1dde49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,219 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:20,219 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57464, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,220 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,220 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:20,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:20,221 DEBUG [hconnection-0x54710e53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,221 DEBUG [hconnection-0x71eee384-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,221 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,222 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,222 DEBUG [hconnection-0x752d985f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,223 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,223 DEBUG [hconnection-0x786575f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,224 DEBUG [hconnection-0x3cd2356b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,224 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,224 DEBUG [hconnection-0x6a043d24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:20,225 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,226 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:20,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:20,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:26:20,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:20,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:20,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:20,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:20,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:20,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:20,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/c62ae1ce59704076b6808ae414f3bb0d is 50, key is test_row_0/A:col10/1733840780229/Put/seqid=0 2024-12-10T14:26:20,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840840255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840840256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840840257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840840261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840840262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742081_1257 (size=12001) 2024-12-10T14:26:20,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/c62ae1ce59704076b6808ae414f3bb0d 2024-12-10T14:26:20,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7bad3117ece74a5a9fa57a3660620925 is 50, key is test_row_0/B:col10/1733840780229/Put/seqid=0 2024-12-10T14:26:20,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:26:20,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742082_1258 (size=12001) 2024-12-10T14:26:20,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7bad3117ece74a5a9fa57a3660620925 2024-12-10T14:26:20,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/48a36015801a448e86b56e63a1dff9f8 is 50, key is test_row_0/C:col10/1733840780229/Put/seqid=0 2024-12-10T14:26:20,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840840365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840840365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840840365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840840369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840840369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:20,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T14:26:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:20,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:20,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:20,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742083_1259 (size=12001) 2024-12-10T14:26:20,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/48a36015801a448e86b56e63a1dff9f8 2024-12-10T14:26:20,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/c62ae1ce59704076b6808ae414f3bb0d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/c62ae1ce59704076b6808ae414f3bb0d 2024-12-10T14:26:20,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/c62ae1ce59704076b6808ae414f3bb0d, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T14:26:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7bad3117ece74a5a9fa57a3660620925 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7bad3117ece74a5a9fa57a3660620925 2024-12-10T14:26:20,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7bad3117ece74a5a9fa57a3660620925, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T14:26:20,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/48a36015801a448e86b56e63a1dff9f8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/48a36015801a448e86b56e63a1dff9f8 2024-12-10T14:26:20,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/48a36015801a448e86b56e63a1dff9f8, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T14:26:20,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 718b511ced67f3365dbb07f1afd9efaa in 190ms, sequenceid=13, compaction requested=false 2024-12-10T14:26:20,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:20,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:26:20,527 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:20,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T14:26:20,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:20,527 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T14:26:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:20,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/982a5e17c5824628aac977c0f07432cc is 50, key is test_row_0/A:col10/1733840780256/Put/seqid=0 2024-12-10T14:26:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742084_1260 (size=12001) 2024-12-10T14:26:20,541 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/982a5e17c5824628aac977c0f07432cc 2024-12-10T14:26:20,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/5022554ece15460ca83e4f489bef3c80 is 50, key is test_row_0/B:col10/1733840780256/Put/seqid=0 2024-12-10T14:26:20,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742085_1261 (size=12001) 2024-12-10T14:26:20,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:20,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:20,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840840575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840840577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840840577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840840578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840840581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840840682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840840682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840840682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840840683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:26:20,874 DEBUG [master/db1d50717577:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d677ce41b1f947badc4a07f8de4e4b16 changed from -1.0 to 0.0, refreshing cache 2024-12-10T14:26:20,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840840880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840840885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840840885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840840885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:20,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840840887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:20,959 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/5022554ece15460ca83e4f489bef3c80 2024-12-10T14:26:20,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/5251b2a2b6ba4eecb615d0c1db7856db is 50, key is test_row_0/C:col10/1733840780256/Put/seqid=0 2024-12-10T14:26:20,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742086_1262 (size=12001) 2024-12-10T14:26:21,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840841188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840841188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840841189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840841189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:26:21,370 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/5251b2a2b6ba4eecb615d0c1db7856db 2024-12-10T14:26:21,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/982a5e17c5824628aac977c0f07432cc as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/982a5e17c5824628aac977c0f07432cc 2024-12-10T14:26:21,379 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/982a5e17c5824628aac977c0f07432cc, entries=150, sequenceid=37, filesize=11.7 K 2024-12-10T14:26:21,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/5022554ece15460ca83e4f489bef3c80 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/5022554ece15460ca83e4f489bef3c80 2024-12-10T14:26:21,384 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/5022554ece15460ca83e4f489bef3c80, entries=150, sequenceid=37, filesize=11.7 K 2024-12-10T14:26:21,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/5251b2a2b6ba4eecb615d0c1db7856db as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/5251b2a2b6ba4eecb615d0c1db7856db 2024-12-10T14:26:21,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840841383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,388 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/5251b2a2b6ba4eecb615d0c1db7856db, entries=150, sequenceid=37, filesize=11.7 K 2024-12-10T14:26:21,389 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 718b511ced67f3365dbb07f1afd9efaa in 862ms, sequenceid=37, compaction requested=false 2024-12-10T14:26:21,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:21,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:21,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-10T14:26:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-10T14:26:21,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-10T14:26:21,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1700 sec 2024-12-10T14:26:21,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.1750 sec 2024-12-10T14:26:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:21,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T14:26:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:21,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:21,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:21,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:21,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/ff03f5f0899a42a7bd5516cd3d946ba9 is 50, key is test_row_0/A:col10/1733840780574/Put/seqid=0 2024-12-10T14:26:21,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742087_1263 (size=12001) 2024-12-10T14:26:21,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840841710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840841710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840841713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840841713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840841814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840841814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840841816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:21,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840841816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:21,985 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T14:26:22,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840842017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840842017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840842018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840842018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/ff03f5f0899a42a7bd5516cd3d946ba9 2024-12-10T14:26:22,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c6814bb8e4b04fcd975f2fc0fce0bd7e is 50, key is test_row_0/B:col10/1733840780574/Put/seqid=0 2024-12-10T14:26:22,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742088_1264 (size=12001) 2024-12-10T14:26:22,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840842320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840842320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840842320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T14:26:22,323 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-10T14:26:22,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:22,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840842322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-10T14:26:22,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T14:26:22,326 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:22,326 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:22,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:22,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840842392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T14:26:22,478 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:22,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T14:26:22,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:22,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:22,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:22,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:22,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:22,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c6814bb8e4b04fcd975f2fc0fce0bd7e 2024-12-10T14:26:22,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/d4e8a76f6c2e4a36b0da3a82330a74ac is 50, key is test_row_0/C:col10/1733840780574/Put/seqid=0 2024-12-10T14:26:22,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742089_1265 (size=12001) 2024-12-10T14:26:22,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/d4e8a76f6c2e4a36b0da3a82330a74ac 2024-12-10T14:26:22,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/ff03f5f0899a42a7bd5516cd3d946ba9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ff03f5f0899a42a7bd5516cd3d946ba9 2024-12-10T14:26:22,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ff03f5f0899a42a7bd5516cd3d946ba9, entries=150, sequenceid=51, filesize=11.7 K 2024-12-10T14:26:22,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c6814bb8e4b04fcd975f2fc0fce0bd7e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c6814bb8e4b04fcd975f2fc0fce0bd7e 2024-12-10T14:26:22,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c6814bb8e4b04fcd975f2fc0fce0bd7e, entries=150, sequenceid=51, filesize=11.7 K 2024-12-10T14:26:22,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/d4e8a76f6c2e4a36b0da3a82330a74ac as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d4e8a76f6c2e4a36b0da3a82330a74ac 2024-12-10T14:26:22,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d4e8a76f6c2e4a36b0da3a82330a74ac, entries=150, sequenceid=51, filesize=11.7 K 2024-12-10T14:26:22,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 718b511ced67f3365dbb07f1afd9efaa in 854ms, sequenceid=51, compaction requested=true 2024-12-10T14:26:22,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:22,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:22,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:22,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:22,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:22,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:22,545 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:22,545 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:22,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:22,547 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:22,547 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:22,547 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:22,547 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:22,547 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7bad3117ece74a5a9fa57a3660620925, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/5022554ece15460ca83e4f489bef3c80, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c6814bb8e4b04fcd975f2fc0fce0bd7e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.2 K 2024-12-10T14:26:22,547 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:22,547 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:22,547 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/c62ae1ce59704076b6808ae414f3bb0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/982a5e17c5824628aac977c0f07432cc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ff03f5f0899a42a7bd5516cd3d946ba9] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.2 K 2024-12-10T14:26:22,548 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bad3117ece74a5a9fa57a3660620925, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733840780227 2024-12-10T14:26:22,548 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting c62ae1ce59704076b6808ae414f3bb0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733840780227 2024-12-10T14:26:22,548 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5022554ece15460ca83e4f489bef3c80, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733840780255 2024-12-10T14:26:22,549 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 982a5e17c5824628aac977c0f07432cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733840780255 2024-12-10T14:26:22,549 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff03f5f0899a42a7bd5516cd3d946ba9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733840780574 2024-12-10T14:26:22,549 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c6814bb8e4b04fcd975f2fc0fce0bd7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733840780574 2024-12-10T14:26:22,557 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#213 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:22,558 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/42f2e03d9f6048499fa8653dcb609501 is 50, key is test_row_0/A:col10/1733840780574/Put/seqid=0 2024-12-10T14:26:22,568 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#214 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:22,568 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/ba5b4ff4ec334caba92fd8d03fd4d0d3 is 50, key is test_row_0/B:col10/1733840780574/Put/seqid=0 2024-12-10T14:26:22,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742091_1267 (size=12104) 2024-12-10T14:26:22,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742090_1266 (size=12104) 2024-12-10T14:26:22,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T14:26:22,630 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:22,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T14:26:22,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:22,631 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T14:26:22,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:22,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:22,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:22,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:22,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:22,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:22,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/6d0a87d3d5564dbc81b3fbc555c6ce6f is 50, key is test_row_0/A:col10/1733840781697/Put/seqid=0 2024-12-10T14:26:22,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742092_1268 (size=12001) 2024-12-10T14:26:22,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:22,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:22,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840842863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840842866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840842867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840842868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T14:26:22,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840842968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840842972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840842972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:22,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840842972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:22,981 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/ba5b4ff4ec334caba92fd8d03fd4d0d3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/ba5b4ff4ec334caba92fd8d03fd4d0d3 2024-12-10T14:26:22,986 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/42f2e03d9f6048499fa8653dcb609501 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/42f2e03d9f6048499fa8653dcb609501 2024-12-10T14:26:22,986 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into ba5b4ff4ec334caba92fd8d03fd4d0d3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:22,986 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:22,986 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=13, startTime=1733840782545; duration=0sec 2024-12-10T14:26:22,986 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:22,986 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:22,987 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:22,988 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:22,988 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:22,988 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:22,988 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/48a36015801a448e86b56e63a1dff9f8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/5251b2a2b6ba4eecb615d0c1db7856db, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d4e8a76f6c2e4a36b0da3a82330a74ac] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.2 K 2024-12-10T14:26:22,988 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 48a36015801a448e86b56e63a1dff9f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733840780227 2024-12-10T14:26:22,989 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5251b2a2b6ba4eecb615d0c1db7856db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733840780255 2024-12-10T14:26:22,989 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting d4e8a76f6c2e4a36b0da3a82330a74ac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733840780574 2024-12-10T14:26:22,991 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into 42f2e03d9f6048499fa8653dcb609501(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:22,991 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:22,991 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=13, startTime=1733840782545; duration=0sec 2024-12-10T14:26:22,991 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:22,991 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:22,997 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#216 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:22,997 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/40d2944a5d514ce7b6754187c27f8976 is 50, key is test_row_0/C:col10/1733840780574/Put/seqid=0 2024-12-10T14:26:23,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742093_1269 (size=12104) 2024-12-10T14:26:23,058 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/6d0a87d3d5564dbc81b3fbc555c6ce6f 2024-12-10T14:26:23,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/fc508aacfe6340b4b5a6573255723746 is 50, key is test_row_0/B:col10/1733840781697/Put/seqid=0 2024-12-10T14:26:23,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742094_1270 (size=12001) 2024-12-10T14:26:23,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840843171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840843174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840843175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840843175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,408 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/40d2944a5d514ce7b6754187c27f8976 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/40d2944a5d514ce7b6754187c27f8976 2024-12-10T14:26:23,412 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into 40d2944a5d514ce7b6754187c27f8976(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:23,412 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:23,412 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=13, startTime=1733840782545; duration=0sec 2024-12-10T14:26:23,412 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:23,412 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:23,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T14:26:23,470 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/fc508aacfe6340b4b5a6573255723746 2024-12-10T14:26:23,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840843472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840843476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/debb634fa5a14da08b8099cf70c76cd3 is 50, key is test_row_0/C:col10/1733840781697/Put/seqid=0 2024-12-10T14:26:23,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840843479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:23,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840843480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:23,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742095_1271 (size=12001) 2024-12-10T14:26:23,883 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/debb634fa5a14da08b8099cf70c76cd3 2024-12-10T14:26:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/6d0a87d3d5564dbc81b3fbc555c6ce6f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6d0a87d3d5564dbc81b3fbc555c6ce6f 2024-12-10T14:26:23,892 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6d0a87d3d5564dbc81b3fbc555c6ce6f, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T14:26:23,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/fc508aacfe6340b4b5a6573255723746 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/fc508aacfe6340b4b5a6573255723746 2024-12-10T14:26:23,897 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/fc508aacfe6340b4b5a6573255723746, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T14:26:23,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/debb634fa5a14da08b8099cf70c76cd3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/debb634fa5a14da08b8099cf70c76cd3 2024-12-10T14:26:23,902 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/debb634fa5a14da08b8099cf70c76cd3, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T14:26:23,902 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 718b511ced67f3365dbb07f1afd9efaa in 1271ms, sequenceid=74, compaction requested=false 2024-12-10T14:26:23,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:23,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:23,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-10T14:26:23,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-10T14:26:23,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-10T14:26:23,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5770 sec 2024-12-10T14:26:23,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.5810 sec 2024-12-10T14:26:23,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:23,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:26:23,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:23,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:23,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:23,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:23,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:23,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:23,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/d15f0a4ddb2843d79c4f0b4a0744fb73 is 50, key is test_row_0/A:col10/1733840782866/Put/seqid=0 2024-12-10T14:26:23,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742096_1272 (size=14341) 2024-12-10T14:26:24,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840844000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840844000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840844001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840844001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840844104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840844105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840844105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840844105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840844308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840844308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840844309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840844309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/d15f0a4ddb2843d79c4f0b4a0744fb73 2024-12-10T14:26:24,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/0185750d33ff4a6b96007d21c955851e is 50, key is test_row_0/B:col10/1733840782866/Put/seqid=0 2024-12-10T14:26:24,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840844405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,408 DEBUG [Thread-1181 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:26:24,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742097_1273 (size=12001) 2024-12-10T14:26:24,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/0185750d33ff4a6b96007d21c955851e 2024-12-10T14:26:24,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/1d084e97d224439895ed17815621a3f1 is 50, key is test_row_0/C:col10/1733840782866/Put/seqid=0 2024-12-10T14:26:24,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742098_1274 (size=12001) 2024-12-10T14:26:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T14:26:24,430 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-10T14:26:24,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-10T14:26:24,433 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T14:26:24,433 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:24,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T14:26:24,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:24,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-10T14:26:24,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:24,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:24,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:24,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:24,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:24,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840844610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840844611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840844612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840844614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T14:26:24,738 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:24,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-10T14:26:24,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:24,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:24,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:24,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:24,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:24,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/1d084e97d224439895ed17815621a3f1 2024-12-10T14:26:24,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/d15f0a4ddb2843d79c4f0b4a0744fb73 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d15f0a4ddb2843d79c4f0b4a0744fb73 2024-12-10T14:26:24,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d15f0a4ddb2843d79c4f0b4a0744fb73, entries=200, sequenceid=92, filesize=14.0 K 2024-12-10T14:26:24,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/0185750d33ff4a6b96007d21c955851e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/0185750d33ff4a6b96007d21c955851e 2024-12-10T14:26:24,840 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/0185750d33ff4a6b96007d21c955851e, entries=150, sequenceid=92, filesize=11.7 K 2024-12-10T14:26:24,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/1d084e97d224439895ed17815621a3f1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1d084e97d224439895ed17815621a3f1 2024-12-10T14:26:24,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1d084e97d224439895ed17815621a3f1, entries=150, sequenceid=92, filesize=11.7 K 2024-12-10T14:26:24,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 718b511ced67f3365dbb07f1afd9efaa in 864ms, sequenceid=92, compaction requested=true 2024-12-10T14:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:24,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:24,845 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:24,845 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:24,846 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:24,846 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:24,846 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:24,846 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:24,846 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:24,846 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:24,846 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/ba5b4ff4ec334caba92fd8d03fd4d0d3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/fc508aacfe6340b4b5a6573255723746, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/0185750d33ff4a6b96007d21c955851e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.3 K 2024-12-10T14:26:24,846 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/42f2e03d9f6048499fa8653dcb609501, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6d0a87d3d5564dbc81b3fbc555c6ce6f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d15f0a4ddb2843d79c4f0b4a0744fb73] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=37.5 K 2024-12-10T14:26:24,847 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42f2e03d9f6048499fa8653dcb609501, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733840780574 2024-12-10T14:26:24,847 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ba5b4ff4ec334caba92fd8d03fd4d0d3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733840780574 2024-12-10T14:26:24,847 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d0a87d3d5564dbc81b3fbc555c6ce6f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733840781697 2024-12-10T14:26:24,847 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting fc508aacfe6340b4b5a6573255723746, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733840781697 2024-12-10T14:26:24,848 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting d15f0a4ddb2843d79c4f0b4a0744fb73, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733840782861 2024-12-10T14:26:24,848 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 0185750d33ff4a6b96007d21c955851e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733840782866 2024-12-10T14:26:24,855 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#222 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:24,856 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#223 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:24,856 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/6cb19355989a4a33befdb3568e1e621c is 50, key is test_row_0/A:col10/1733840782866/Put/seqid=0 2024-12-10T14:26:24,856 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/3ded05b0b2d348f19790d6e8903f44fe is 50, key is test_row_0/B:col10/1733840782866/Put/seqid=0 2024-12-10T14:26:24,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742099_1275 (size=12207) 2024-12-10T14:26:24,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742100_1276 (size=12207) 2024-12-10T14:26:24,891 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:24,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-10T14:26:24,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:24,892 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-10T14:26:24,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:24,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:24,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:24,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:24,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:24,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:24,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/1407704c313546e48b3888aa4aee04fc is 50, key is test_row_0/A:col10/1733840783999/Put/seqid=0 2024-12-10T14:26:24,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742101_1277 (size=12001) 2024-12-10T14:26:25,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T14:26:25,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:25,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:25,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840845124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840845124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840845125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840845126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840845227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840845228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840845228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840845229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,268 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/3ded05b0b2d348f19790d6e8903f44fe as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/3ded05b0b2d348f19790d6e8903f44fe 2024-12-10T14:26:25,269 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/6cb19355989a4a33befdb3568e1e621c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6cb19355989a4a33befdb3568e1e621c 2024-12-10T14:26:25,273 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into 3ded05b0b2d348f19790d6e8903f44fe(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:25,273 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:25,273 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=13, startTime=1733840784845; duration=0sec 2024-12-10T14:26:25,273 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:25,273 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:25,273 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:25,274 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into 6cb19355989a4a33befdb3568e1e621c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:25,274 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:25,274 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=13, startTime=1733840784845; duration=0sec 2024-12-10T14:26:25,274 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:25,274 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:25,275 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:25,275 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:25,275 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:25,275 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/40d2944a5d514ce7b6754187c27f8976, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/debb634fa5a14da08b8099cf70c76cd3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1d084e97d224439895ed17815621a3f1] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.3 K 2024-12-10T14:26:25,276 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 40d2944a5d514ce7b6754187c27f8976, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733840780574 2024-12-10T14:26:25,276 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting debb634fa5a14da08b8099cf70c76cd3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733840781697 2024-12-10T14:26:25,276 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d084e97d224439895ed17815621a3f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733840782866 2024-12-10T14:26:25,283 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:25,284 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/4288ef74f57e4dfdbc69935c4ad374d5 is 50, key is test_row_0/C:col10/1733840782866/Put/seqid=0 2024-12-10T14:26:25,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742102_1278 (size=12207) 2024-12-10T14:26:25,300 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/1407704c313546e48b3888aa4aee04fc 2024-12-10T14:26:25,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c0e705af4c9a434fb4bbd2a11540cc55 is 50, key is test_row_0/B:col10/1733840783999/Put/seqid=0 2024-12-10T14:26:25,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742103_1279 (size=12001) 2024-12-10T14:26:25,311 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c0e705af4c9a434fb4bbd2a11540cc55 2024-12-10T14:26:25,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/b1bc690e22324df1ba9415a8df775a45 is 50, key is test_row_0/C:col10/1733840783999/Put/seqid=0 2024-12-10T14:26:25,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742104_1280 (size=12001) 2024-12-10T14:26:25,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840845431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840845431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840845431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840845432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T14:26:25,693 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/4288ef74f57e4dfdbc69935c4ad374d5 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4288ef74f57e4dfdbc69935c4ad374d5 2024-12-10T14:26:25,698 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into 4288ef74f57e4dfdbc69935c4ad374d5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:25,698 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:25,698 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=13, startTime=1733840784845; duration=0sec 2024-12-10T14:26:25,698 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:25,698 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:25,725 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=113 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/b1bc690e22324df1ba9415a8df775a45 2024-12-10T14:26:25,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/1407704c313546e48b3888aa4aee04fc as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1407704c313546e48b3888aa4aee04fc 2024-12-10T14:26:25,733 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1407704c313546e48b3888aa4aee04fc, entries=150, sequenceid=113, filesize=11.7 K 2024-12-10T14:26:25,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c0e705af4c9a434fb4bbd2a11540cc55 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c0e705af4c9a434fb4bbd2a11540cc55 2024-12-10T14:26:25,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840845734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840845734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840845736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:25,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840845736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:25,739 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c0e705af4c9a434fb4bbd2a11540cc55, entries=150, sequenceid=113, filesize=11.7 K 2024-12-10T14:26:25,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/b1bc690e22324df1ba9415a8df775a45 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b1bc690e22324df1ba9415a8df775a45 2024-12-10T14:26:25,743 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b1bc690e22324df1ba9415a8df775a45, entries=150, sequenceid=113, filesize=11.7 K 2024-12-10T14:26:25,744 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 718b511ced67f3365dbb07f1afd9efaa in 853ms, sequenceid=113, compaction requested=false 2024-12-10T14:26:25,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:25,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:25,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-10T14:26:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-10T14:26:25,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-10T14:26:25,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3110 sec 2024-12-10T14:26:25,748 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.3160 sec 2024-12-10T14:26:26,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:26,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T14:26:26,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:26,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:26,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:26,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:26,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:26,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:26,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/ba8e73c456f74c8088fd4b973733663e is 50, key is test_row_0/A:col10/1733840786239/Put/seqid=0 2024-12-10T14:26:26,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742105_1281 (size=12101) 2024-12-10T14:26:26,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840846254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840846256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840846256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840846257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840846357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840846359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840846359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840846360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T14:26:26,537 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-10T14:26:26,538 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:26,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-10T14:26:26,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T14:26:26,539 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:26,540 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:26,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:26,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840846560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840846562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840846562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840846562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T14:26:26,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/ba8e73c456f74c8088fd4b973733663e 2024-12-10T14:26:26,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/87583b8132f8491090c5e47037ad0cb9 is 50, key is test_row_0/B:col10/1733840786239/Put/seqid=0 2024-12-10T14:26:26,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742106_1282 (size=12101) 2024-12-10T14:26:26,691 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:26,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T14:26:26,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:26,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:26,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:26,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T14:26:26,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:26,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T14:26:26,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:26,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:26,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:26,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840846865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840846864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840846865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:26,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840846867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:26,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:26,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T14:26:26,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:26,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:26,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:26,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:26,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/87583b8132f8491090c5e47037ad0cb9 2024-12-10T14:26:27,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/b23cf149484e47e3af1fc78c9174e686 is 50, key is test_row_0/C:col10/1733840786239/Put/seqid=0 2024-12-10T14:26:27,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742107_1283 (size=12101) 2024-12-10T14:26:27,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T14:26:27,149 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:27,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T14:26:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,150 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,302 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:27,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T14:26:27,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:27,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:27,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840847368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:27,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:27,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840847370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:27,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:27,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840847370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:27,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:27,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840847371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:27,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:27,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T14:26:27,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:27,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:27,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/b23cf149484e47e3af1fc78c9174e686 2024-12-10T14:26:27,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/ba8e73c456f74c8088fd4b973733663e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ba8e73c456f74c8088fd4b973733663e 2024-12-10T14:26:27,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ba8e73c456f74c8088fd4b973733663e, entries=150, sequenceid=133, filesize=11.8 K 2024-12-10T14:26:27,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/87583b8132f8491090c5e47037ad0cb9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/87583b8132f8491090c5e47037ad0cb9 2024-12-10T14:26:27,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/87583b8132f8491090c5e47037ad0cb9, entries=150, sequenceid=133, filesize=11.8 K 2024-12-10T14:26:27,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/b23cf149484e47e3af1fc78c9174e686 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b23cf149484e47e3af1fc78c9174e686 2024-12-10T14:26:27,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b23cf149484e47e3af1fc78c9174e686, entries=150, sequenceid=133, filesize=11.8 K 2024-12-10T14:26:27,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 718b511ced67f3365dbb07f1afd9efaa in 1253ms, sequenceid=133, compaction requested=true 2024-12-10T14:26:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:27,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:27,492 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:27,492 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:27,494 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:27,494 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:27,494 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,494 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/3ded05b0b2d348f19790d6e8903f44fe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c0e705af4c9a434fb4bbd2a11540cc55, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/87583b8132f8491090c5e47037ad0cb9] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.5 K 2024-12-10T14:26:27,494 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:27,494 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:27,494 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,494 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6cb19355989a4a33befdb3568e1e621c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1407704c313546e48b3888aa4aee04fc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ba8e73c456f74c8088fd4b973733663e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.5 K 2024-12-10T14:26:27,496 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cb19355989a4a33befdb3568e1e621c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733840782866 2024-12-10T14:26:27,496 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ded05b0b2d348f19790d6e8903f44fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733840782866 2024-12-10T14:26:27,496 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1407704c313546e48b3888aa4aee04fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733840783996 2024-12-10T14:26:27,496 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c0e705af4c9a434fb4bbd2a11540cc55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733840783996 2024-12-10T14:26:27,497 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba8e73c456f74c8088fd4b973733663e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733840785118 2024-12-10T14:26:27,497 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 87583b8132f8491090c5e47037ad0cb9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733840785118 2024-12-10T14:26:27,504 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#231 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:27,505 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#232 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:27,505 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/e5b0cf0818fa4934a912d04c4ab16720 is 50, key is test_row_0/A:col10/1733840786239/Put/seqid=0 2024-12-10T14:26:27,505 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7d79937d2dbd447c9318008c2f7fc36a is 50, key is test_row_0/B:col10/1733840786239/Put/seqid=0 2024-12-10T14:26:27,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742109_1285 (size=12409) 2024-12-10T14:26:27,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742108_1284 (size=12409) 2024-12-10T14:26:27,515 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/e5b0cf0818fa4934a912d04c4ab16720 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e5b0cf0818fa4934a912d04c4ab16720 2024-12-10T14:26:27,521 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into e5b0cf0818fa4934a912d04c4ab16720(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:27,521 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:27,521 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=13, startTime=1733840787492; duration=0sec 2024-12-10T14:26:27,521 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:27,521 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:27,521 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:27,522 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:27,523 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:27,523 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,523 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4288ef74f57e4dfdbc69935c4ad374d5, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b1bc690e22324df1ba9415a8df775a45, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b23cf149484e47e3af1fc78c9174e686] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.5 K 2024-12-10T14:26:27,523 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4288ef74f57e4dfdbc69935c4ad374d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733840782866 2024-12-10T14:26:27,524 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1bc690e22324df1ba9415a8df775a45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=113, earliestPutTs=1733840783996 2024-12-10T14:26:27,524 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b23cf149484e47e3af1fc78c9174e686, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733840785118 2024-12-10T14:26:27,525 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T14:26:27,525 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-10T14:26:27,534 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#233 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:27,534 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/d334f1d26c1a4016a4c38932bf0cef17 is 50, key is test_row_0/C:col10/1733840786239/Put/seqid=0 2024-12-10T14:26:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742110_1286 (size=12409) 2024-12-10T14:26:27,547 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/d334f1d26c1a4016a4c38932bf0cef17 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d334f1d26c1a4016a4c38932bf0cef17 2024-12-10T14:26:27,553 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into d334f1d26c1a4016a4c38932bf0cef17(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:27,553 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:27,553 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=13, startTime=1733840787492; duration=0sec 2024-12-10T14:26:27,553 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:27,553 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:27,608 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:27,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T14:26:27,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:27,609 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-10T14:26:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:27,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/75176d7e277a4b4aac1db9afe3b72739 is 50, key is test_row_0/A:col10/1733840786255/Put/seqid=0 2024-12-10T14:26:27,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742111_1287 (size=12151) 2024-12-10T14:26:27,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T14:26:27,916 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7d79937d2dbd447c9318008c2f7fc36a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d79937d2dbd447c9318008c2f7fc36a 2024-12-10T14:26:27,932 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into 7d79937d2dbd447c9318008c2f7fc36a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:27,932 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:27,932 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=13, startTime=1733840787492; duration=0sec 2024-12-10T14:26:27,932 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:27,932 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:28,025 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/75176d7e277a4b4aac1db9afe3b72739 2024-12-10T14:26:28,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/1f30ee75f7a04136924b3e9e0dc514a8 is 50, key is test_row_0/B:col10/1733840786255/Put/seqid=0 2024-12-10T14:26:28,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742112_1288 (size=12151) 2024-12-10T14:26:28,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:28,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:28,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840848388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840848389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840848390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840848390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840848429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,430 DEBUG [Thread-1181 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:26:28,437 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/1f30ee75f7a04136924b3e9e0dc514a8 2024-12-10T14:26:28,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/7470ddb473b34e8f885382be87d14307 is 50, key is test_row_0/C:col10/1733840786255/Put/seqid=0 2024-12-10T14:26:28,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742113_1289 (size=12151) 2024-12-10T14:26:28,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840848492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840848492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840848493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840848493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T14:26:28,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840848695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840848696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840848696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:28,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840848697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:28,857 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/7470ddb473b34e8f885382be87d14307 2024-12-10T14:26:28,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/75176d7e277a4b4aac1db9afe3b72739 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/75176d7e277a4b4aac1db9afe3b72739 2024-12-10T14:26:28,867 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/75176d7e277a4b4aac1db9afe3b72739, entries=150, sequenceid=155, filesize=11.9 K 2024-12-10T14:26:28,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/1f30ee75f7a04136924b3e9e0dc514a8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/1f30ee75f7a04136924b3e9e0dc514a8 2024-12-10T14:26:28,871 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/1f30ee75f7a04136924b3e9e0dc514a8, entries=150, sequenceid=155, filesize=11.9 K 2024-12-10T14:26:28,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/7470ddb473b34e8f885382be87d14307 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7470ddb473b34e8f885382be87d14307 2024-12-10T14:26:28,876 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7470ddb473b34e8f885382be87d14307, entries=150, sequenceid=155, filesize=11.9 K 2024-12-10T14:26:28,877 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 718b511ced67f3365dbb07f1afd9efaa in 1269ms, sequenceid=155, compaction requested=false 2024-12-10T14:26:28,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:28,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:28,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-10T14:26:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-10T14:26:28,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-10T14:26:28,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3380 sec 2024-12-10T14:26:28,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.3420 sec 2024-12-10T14:26:29,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:29,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-10T14:26:29,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:29,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:29,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:29,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:29,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:29,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:29,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b682f715433a44cdb4594b6d9a83c750 is 50, key is test_row_0/A:col10/1733840788389/Put/seqid=0 2024-12-10T14:26:29,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742114_1290 (size=14541) 2024-12-10T14:26:29,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840849013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840849014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840849016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840849016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840849117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840849118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840849120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840849120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840849320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840849321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840849322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840849323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b682f715433a44cdb4594b6d9a83c750 2024-12-10T14:26:29,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/db78053980bf4d76b48a71e8335e7450 is 50, key is test_row_0/B:col10/1733840788389/Put/seqid=0 2024-12-10T14:26:29,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742115_1291 (size=12151) 2024-12-10T14:26:29,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840849622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840849623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840849625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840849627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:29,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/db78053980bf4d76b48a71e8335e7450 2024-12-10T14:26:29,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/4277cdec89414c3bac0d274b8159d593 is 50, key is test_row_0/C:col10/1733840788389/Put/seqid=0 2024-12-10T14:26:29,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742116_1292 (size=12151) 2024-12-10T14:26:30,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:30,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840850126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:30,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:30,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840850128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:30,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840850131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:30,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:30,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840850132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:30,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/4277cdec89414c3bac0d274b8159d593 2024-12-10T14:26:30,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b682f715433a44cdb4594b6d9a83c750 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b682f715433a44cdb4594b6d9a83c750 2024-12-10T14:26:30,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b682f715433a44cdb4594b6d9a83c750, entries=200, sequenceid=174, filesize=14.2 K 2024-12-10T14:26:30,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/db78053980bf4d76b48a71e8335e7450 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/db78053980bf4d76b48a71e8335e7450 2024-12-10T14:26:30,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/db78053980bf4d76b48a71e8335e7450, entries=150, sequenceid=174, filesize=11.9 K 2024-12-10T14:26:30,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/4277cdec89414c3bac0d274b8159d593 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4277cdec89414c3bac0d274b8159d593 2024-12-10T14:26:30,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4277cdec89414c3bac0d274b8159d593, entries=150, sequenceid=174, filesize=11.9 K 2024-12-10T14:26:30,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 718b511ced67f3365dbb07f1afd9efaa in 1258ms, sequenceid=174, compaction requested=true 2024-12-10T14:26:30,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:30,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:30,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:30,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:30,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:30,258 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:30,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:30,258 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:30,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:30,268 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39101 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:30,268 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:30,269 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:30,269 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:30,269 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:30,269 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:30,269 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e5b0cf0818fa4934a912d04c4ab16720, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/75176d7e277a4b4aac1db9afe3b72739, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b682f715433a44cdb4594b6d9a83c750] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=38.2 K 2024-12-10T14:26:30,269 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d79937d2dbd447c9318008c2f7fc36a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/1f30ee75f7a04136924b3e9e0dc514a8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/db78053980bf4d76b48a71e8335e7450] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.9 K 2024-12-10T14:26:30,269 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d79937d2dbd447c9318008c2f7fc36a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733840785118 2024-12-10T14:26:30,269 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5b0cf0818fa4934a912d04c4ab16720, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733840785118 2024-12-10T14:26:30,269 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f30ee75f7a04136924b3e9e0dc514a8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733840786253 2024-12-10T14:26:30,270 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75176d7e277a4b4aac1db9afe3b72739, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733840786253 2024-12-10T14:26:30,271 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b682f715433a44cdb4594b6d9a83c750, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733840788388 2024-12-10T14:26:30,271 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting db78053980bf4d76b48a71e8335e7450, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733840788388 2024-12-10T14:26:30,279 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#241 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:30,279 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:30,280 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/1bcc9ca5d9ad408e979262e7233898a3 is 50, key is test_row_0/A:col10/1733840788389/Put/seqid=0 2024-12-10T14:26:30,280 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c766e4f50f7c4a4c97392984ce8e49cf is 50, key is test_row_0/B:col10/1733840788389/Put/seqid=0 2024-12-10T14:26:30,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742117_1293 (size=12561) 2024-12-10T14:26:30,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742118_1294 (size=12561) 2024-12-10T14:26:30,291 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/1bcc9ca5d9ad408e979262e7233898a3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1bcc9ca5d9ad408e979262e7233898a3 2024-12-10T14:26:30,293 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/c766e4f50f7c4a4c97392984ce8e49cf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c766e4f50f7c4a4c97392984ce8e49cf 2024-12-10T14:26:30,297 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into 1bcc9ca5d9ad408e979262e7233898a3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:30,297 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:30,297 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=13, startTime=1733840790258; duration=0sec 2024-12-10T14:26:30,297 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:30,297 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:30,297 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:30,298 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into c766e4f50f7c4a4c97392984ce8e49cf(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:30,298 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:30,298 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=13, startTime=1733840790258; duration=0sec 2024-12-10T14:26:30,298 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:30,298 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:30,299 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:30,299 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:30,299 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:30,299 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d334f1d26c1a4016a4c38932bf0cef17, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7470ddb473b34e8f885382be87d14307, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4277cdec89414c3bac0d274b8159d593] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=35.9 K 2024-12-10T14:26:30,299 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting d334f1d26c1a4016a4c38932bf0cef17, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733840785118 2024-12-10T14:26:30,300 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7470ddb473b34e8f885382be87d14307, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733840786253 2024-12-10T14:26:30,300 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4277cdec89414c3bac0d274b8159d593, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733840788388 2024-12-10T14:26:30,311 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#242 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:30,312 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/c642479d73224934b5156c05480845f4 is 50, key is test_row_0/C:col10/1733840788389/Put/seqid=0 2024-12-10T14:26:30,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742119_1295 (size=12561) 2024-12-10T14:26:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T14:26:30,644 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-10T14:26:30,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-10T14:26:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T14:26:30,646 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:30,647 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:30,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:30,736 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/c642479d73224934b5156c05480845f4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/c642479d73224934b5156c05480845f4 2024-12-10T14:26:30,742 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into c642479d73224934b5156c05480845f4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:30,742 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:30,742 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=13, startTime=1733840790258; duration=0sec 2024-12-10T14:26:30,742 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:30,742 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T14:26:30,798 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:30,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-10T14:26:30,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:30,799 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T14:26:30,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:30,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:30,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:30,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:30,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:30,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:30,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/234c69408d9a4407a83ed6b720707d9e is 50, key is test_row_0/A:col10/1733840789014/Put/seqid=0 2024-12-10T14:26:30,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742120_1296 (size=12151) 2024-12-10T14:26:30,810 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/234c69408d9a4407a83ed6b720707d9e 2024-12-10T14:26:30,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/a55bf3b7571e4c5e9915cb308779e68e is 50, key is test_row_0/B:col10/1733840789014/Put/seqid=0 2024-12-10T14:26:30,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742121_1297 (size=12151) 2024-12-10T14:26:30,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T14:26:31,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:31,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:31,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840851172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840851172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840851174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840851175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,228 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/a55bf3b7571e4c5e9915cb308779e68e 2024-12-10T14:26:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/734790bc1b524994ac2d437f75d77832 is 50, key is test_row_0/C:col10/1733840789014/Put/seqid=0 2024-12-10T14:26:31,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742122_1298 (size=12151) 2024-12-10T14:26:31,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T14:26:31,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840851276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840851276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840851278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840851279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840851477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840851477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840851481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840851482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,640 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/734790bc1b524994ac2d437f75d77832 2024-12-10T14:26:31,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/234c69408d9a4407a83ed6b720707d9e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/234c69408d9a4407a83ed6b720707d9e 2024-12-10T14:26:31,648 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/234c69408d9a4407a83ed6b720707d9e, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T14:26:31,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/a55bf3b7571e4c5e9915cb308779e68e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/a55bf3b7571e4c5e9915cb308779e68e 2024-12-10T14:26:31,652 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/a55bf3b7571e4c5e9915cb308779e68e, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T14:26:31,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/734790bc1b524994ac2d437f75d77832 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/734790bc1b524994ac2d437f75d77832 2024-12-10T14:26:31,657 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/734790bc1b524994ac2d437f75d77832, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T14:26:31,657 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 718b511ced67f3365dbb07f1afd9efaa in 858ms, sequenceid=196, compaction requested=false 2024-12-10T14:26:31,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:31,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:31,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-10T14:26:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-10T14:26:31,660 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-10T14:26:31,660 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0120 sec 2024-12-10T14:26:31,661 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.0150 sec 2024-12-10T14:26:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T14:26:31,749 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-10T14:26:31,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-10T14:26:31,752 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:26:31,753 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:31,753 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:31,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-10T14:26:31,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:31,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:31,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/5dfe1fd986f348e39b6e8bd231b54bfe is 50, key is test_row_0/A:col10/1733840791173/Put/seqid=0 2024-12-10T14:26:31,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742123_1299 (size=16931) 2024-12-10T14:26:31,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840851797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840851800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840851800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840851800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:26:31,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840851901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840851903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,905 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:31,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840851903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840851903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:31,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:31,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:31,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:31,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:31,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:31,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:26:32,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:32,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:32,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:32,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840852103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840852106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840852107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840852107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/5dfe1fd986f348e39b6e8bd231b54bfe 2024-12-10T14:26:32,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7d40915d75c94e21aec556939d1f6bf4 is 50, key is test_row_0/B:col10/1733840791173/Put/seqid=0 2024-12-10T14:26:32,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742124_1300 (size=12151) 2024-12-10T14:26:32,210 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:32,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:32,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:32,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:26:32,363 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:32,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:32,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:32,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840852406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840852409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840852409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840852411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:32,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:32,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:32,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7d40915d75c94e21aec556939d1f6bf4 2024-12-10T14:26:32,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/583137b91005417c9667c7f40c3194d5 is 50, key is test_row_0/C:col10/1733840791173/Put/seqid=0 2024-12-10T14:26:32,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742125_1301 (size=12151) 2024-12-10T14:26:32,668 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:32,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:32,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:32,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,669 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,821 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:32,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:32,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:32,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:26:32,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840852912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840852913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840852914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840852916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:32,974 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:32,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:32,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:32,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:32,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:33,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/583137b91005417c9667c7f40c3194d5 2024-12-10T14:26:33,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/5dfe1fd986f348e39b6e8bd231b54bfe as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5dfe1fd986f348e39b6e8bd231b54bfe 2024-12-10T14:26:33,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5dfe1fd986f348e39b6e8bd231b54bfe, entries=250, sequenceid=215, filesize=16.5 K 2024-12-10T14:26:33,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7d40915d75c94e21aec556939d1f6bf4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d40915d75c94e21aec556939d1f6bf4 2024-12-10T14:26:33,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d40915d75c94e21aec556939d1f6bf4, entries=150, sequenceid=215, filesize=11.9 K 2024-12-10T14:26:33,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/583137b91005417c9667c7f40c3194d5 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/583137b91005417c9667c7f40c3194d5 2024-12-10T14:26:33,042 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/583137b91005417c9667c7f40c3194d5, entries=150, sequenceid=215, filesize=11.9 K 2024-12-10T14:26:33,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 718b511ced67f3365dbb07f1afd9efaa in 1259ms, sequenceid=215, compaction requested=true 2024-12-10T14:26:33,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:33,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:33,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:33,043 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:33,043 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:33,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:33,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:33,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:33,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:33,044 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:33,044 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:33,044 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:33,044 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c766e4f50f7c4a4c97392984ce8e49cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/a55bf3b7571e4c5e9915cb308779e68e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d40915d75c94e21aec556939d1f6bf4] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.0 K 2024-12-10T14:26:33,044 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:33,045 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:33,045 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:33,045 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1bcc9ca5d9ad408e979262e7233898a3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/234c69408d9a4407a83ed6b720707d9e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5dfe1fd986f348e39b6e8bd231b54bfe] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=40.7 K 2024-12-10T14:26:33,045 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c766e4f50f7c4a4c97392984ce8e49cf, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733840788388 2024-12-10T14:26:33,045 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bcc9ca5d9ad408e979262e7233898a3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733840788388 2024-12-10T14:26:33,045 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a55bf3b7571e4c5e9915cb308779e68e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733840789010 2024-12-10T14:26:33,046 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d40915d75c94e21aec556939d1f6bf4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840791173 2024-12-10T14:26:33,046 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 234c69408d9a4407a83ed6b720707d9e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733840789010 2024-12-10T14:26:33,046 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dfe1fd986f348e39b6e8bd231b54bfe, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840791170 2024-12-10T14:26:33,062 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:33,062 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/8eba74751fbd4895bc4fe5d6efac4efe is 50, key is test_row_0/B:col10/1733840791173/Put/seqid=0 2024-12-10T14:26:33,063 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#250 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:33,063 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/7c9eafefec8846c2b209dd39a29ee0be is 50, key is test_row_0/A:col10/1733840791173/Put/seqid=0 2024-12-10T14:26:33,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742126_1302 (size=12663) 2024-12-10T14:26:33,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742127_1303 (size=12663) 2024-12-10T14:26:33,088 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/8eba74751fbd4895bc4fe5d6efac4efe as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/8eba74751fbd4895bc4fe5d6efac4efe 2024-12-10T14:26:33,090 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/7c9eafefec8846c2b209dd39a29ee0be as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/7c9eafefec8846c2b209dd39a29ee0be 2024-12-10T14:26:33,095 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into 8eba74751fbd4895bc4fe5d6efac4efe(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:33,095 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:33,095 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=13, startTime=1733840793043; duration=0sec 2024-12-10T14:26:33,095 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:33,095 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:33,095 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:33,096 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into 7c9eafefec8846c2b209dd39a29ee0be(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:33,097 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:33,097 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=13, startTime=1733840793043; duration=0sec 2024-12-10T14:26:33,097 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:33,097 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:33,098 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:33,098 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:33,098 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:33,098 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/c642479d73224934b5156c05480845f4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/734790bc1b524994ac2d437f75d77832, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/583137b91005417c9667c7f40c3194d5] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.0 K 2024-12-10T14:26:33,098 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c642479d73224934b5156c05480845f4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733840788388 2024-12-10T14:26:33,098 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 734790bc1b524994ac2d437f75d77832, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733840789010 2024-12-10T14:26:33,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 583137b91005417c9667c7f40c3194d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840791173 2024-12-10T14:26:33,105 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:33,106 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/9d1a802849b3438fb7a44f6576ecb208 is 50, key is test_row_0/C:col10/1733840791173/Put/seqid=0 2024-12-10T14:26:33,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742128_1304 (size=12663) 2024-12-10T14:26:33,127 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:33,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T14:26:33,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:33,127 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-10T14:26:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:33,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:33,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/46edc050932c48f2b50215426472573b is 50, key is test_row_0/A:col10/1733840791799/Put/seqid=0 2024-12-10T14:26:33,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742129_1305 (size=12151) 2024-12-10T14:26:33,515 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/9d1a802849b3438fb7a44f6576ecb208 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/9d1a802849b3438fb7a44f6576ecb208 2024-12-10T14:26:33,519 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into 9d1a802849b3438fb7a44f6576ecb208(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:33,520 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:33,520 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=13, startTime=1733840793043; duration=0sec 2024-12-10T14:26:33,520 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:33,520 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:33,536 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/46edc050932c48f2b50215426472573b 2024-12-10T14:26:33,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/771e4e7c07d04e549f280f97e4854341 is 50, key is test_row_0/B:col10/1733840791799/Put/seqid=0 2024-12-10T14:26:33,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742130_1306 (size=12151) 2024-12-10T14:26:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:26:33,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:33,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:33,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:33,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840853933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:33,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840853936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:33,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840853936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:33,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840853936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:33,949 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/771e4e7c07d04e549f280f97e4854341 2024-12-10T14:26:33,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/66640d3173f142b59d89e6adcb6cba2b is 50, key is test_row_0/C:col10/1733840791799/Put/seqid=0 2024-12-10T14:26:33,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742131_1307 (size=12151) 2024-12-10T14:26:34,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840854037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840854038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840854039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840854039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840854239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840854241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840854241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840854242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,361 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/66640d3173f142b59d89e6adcb6cba2b 2024-12-10T14:26:34,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/46edc050932c48f2b50215426472573b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/46edc050932c48f2b50215426472573b 2024-12-10T14:26:34,369 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/46edc050932c48f2b50215426472573b, entries=150, sequenceid=235, filesize=11.9 K 2024-12-10T14:26:34,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/771e4e7c07d04e549f280f97e4854341 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/771e4e7c07d04e549f280f97e4854341 2024-12-10T14:26:34,374 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/771e4e7c07d04e549f280f97e4854341, entries=150, sequenceid=235, filesize=11.9 K 2024-12-10T14:26:34,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/66640d3173f142b59d89e6adcb6cba2b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/66640d3173f142b59d89e6adcb6cba2b 2024-12-10T14:26:34,378 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/66640d3173f142b59d89e6adcb6cba2b, entries=150, sequenceid=235, filesize=11.9 K 2024-12-10T14:26:34,379 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 718b511ced67f3365dbb07f1afd9efaa in 1252ms, sequenceid=235, compaction requested=false 2024-12-10T14:26:34,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:34,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:34,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-10T14:26:34,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-10T14:26:34,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-10T14:26:34,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6270 sec 2024-12-10T14:26:34,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.6320 sec 2024-12-10T14:26:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:34,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T14:26:34,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:34,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:34,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:34,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:34,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:34,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:34,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/e16251b370484f189bfda0b8d1a1158e is 50, key is test_row_0/A:col10/1733840794544/Put/seqid=0 2024-12-10T14:26:34,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742132_1308 (size=12151) 2024-12-10T14:26:34,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840854561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840854562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840854562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840854563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840854666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840854666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840854666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840854667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840854868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840854869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840854869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:34,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840854870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:34,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/e16251b370484f189bfda0b8d1a1158e 2024-12-10T14:26:34,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/520015d93d6d4d02954c5422e21427c0 is 50, key is test_row_0/B:col10/1733840794544/Put/seqid=0 2024-12-10T14:26:34,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742133_1309 (size=12151) 2024-12-10T14:26:35,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840855171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840855172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840855173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840855174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/520015d93d6d4d02954c5422e21427c0 2024-12-10T14:26:35,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/747795a556904f149e4c462961359e14 is 50, key is test_row_0/C:col10/1733840794544/Put/seqid=0 2024-12-10T14:26:35,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742134_1310 (size=12151) 2024-12-10T14:26:35,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/747795a556904f149e4c462961359e14 2024-12-10T14:26:35,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/e16251b370484f189bfda0b8d1a1158e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e16251b370484f189bfda0b8d1a1158e 2024-12-10T14:26:35,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e16251b370484f189bfda0b8d1a1158e, entries=150, sequenceid=255, filesize=11.9 K 2024-12-10T14:26:35,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/520015d93d6d4d02954c5422e21427c0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/520015d93d6d4d02954c5422e21427c0 2024-12-10T14:26:35,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/520015d93d6d4d02954c5422e21427c0, entries=150, sequenceid=255, filesize=11.9 K 2024-12-10T14:26:35,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/747795a556904f149e4c462961359e14 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/747795a556904f149e4c462961359e14 2024-12-10T14:26:35,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/747795a556904f149e4c462961359e14, entries=150, sequenceid=255, filesize=11.9 K 2024-12-10T14:26:35,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 718b511ced67f3365dbb07f1afd9efaa in 860ms, sequenceid=255, compaction requested=true 2024-12-10T14:26:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:35,406 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:35,406 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:35,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:35,407 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:35,408 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:35,408 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:35,408 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/8eba74751fbd4895bc4fe5d6efac4efe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/771e4e7c07d04e549f280f97e4854341, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/520015d93d6d4d02954c5422e21427c0] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.1 K 2024-12-10T14:26:35,408 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:35,408 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:35,408 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:35,408 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/7c9eafefec8846c2b209dd39a29ee0be, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/46edc050932c48f2b50215426472573b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e16251b370484f189bfda0b8d1a1158e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.1 K 2024-12-10T14:26:35,409 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8eba74751fbd4895bc4fe5d6efac4efe, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840791173 2024-12-10T14:26:35,409 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c9eafefec8846c2b209dd39a29ee0be, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840791173 2024-12-10T14:26:35,410 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 771e4e7c07d04e549f280f97e4854341, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733840791798 2024-12-10T14:26:35,410 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46edc050932c48f2b50215426472573b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733840791798 2024-12-10T14:26:35,410 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting e16251b370484f189bfda0b8d1a1158e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840793932 2024-12-10T14:26:35,410 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 520015d93d6d4d02954c5422e21427c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840793932 2024-12-10T14:26:35,419 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:35,419 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:35,420 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b59cba189ed1491b87e8c811f813add0 is 50, key is test_row_0/A:col10/1733840794544/Put/seqid=0 2024-12-10T14:26:35,421 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/37ae71f05f3a4c5dbc76e7108152335b is 50, key is test_row_0/B:col10/1733840794544/Put/seqid=0 2024-12-10T14:26:35,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742136_1312 (size=12765) 2024-12-10T14:26:35,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742135_1311 (size=12765) 2024-12-10T14:26:35,440 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b59cba189ed1491b87e8c811f813add0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b59cba189ed1491b87e8c811f813add0 2024-12-10T14:26:35,445 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into b59cba189ed1491b87e8c811f813add0(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:35,445 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:35,445 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=13, startTime=1733840795406; duration=0sec 2024-12-10T14:26:35,445 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:35,445 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:35,445 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:35,446 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:35,446 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:35,446 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:35,446 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/9d1a802849b3438fb7a44f6576ecb208, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/66640d3173f142b59d89e6adcb6cba2b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/747795a556904f149e4c462961359e14] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.1 K 2024-12-10T14:26:35,447 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d1a802849b3438fb7a44f6576ecb208, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840791173 2024-12-10T14:26:35,447 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66640d3173f142b59d89e6adcb6cba2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733840791798 2024-12-10T14:26:35,447 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 747795a556904f149e4c462961359e14, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840793932 2024-12-10T14:26:35,454 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:35,454 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/1ec81a0774b540dda6fac501991668c7 is 50, key is test_row_0/C:col10/1733840794544/Put/seqid=0 2024-12-10T14:26:35,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742137_1313 (size=12765) 2024-12-10T14:26:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:35,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T14:26:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:35,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:35,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/f207c717dfd84f3f85937ad2cdfb5ecb is 50, key is test_row_0/A:col10/1733840795675/Put/seqid=0 2024-12-10T14:26:35,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742138_1314 (size=12301) 2024-12-10T14:26:35,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840855689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840855689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840855690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840855691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840855794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840855794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840855794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840855794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,838 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/37ae71f05f3a4c5dbc76e7108152335b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/37ae71f05f3a4c5dbc76e7108152335b 2024-12-10T14:26:35,845 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into 37ae71f05f3a4c5dbc76e7108152335b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:35,845 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:35,845 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=13, startTime=1733840795406; duration=0sec 2024-12-10T14:26:35,845 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:35,845 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:35,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T14:26:35,857 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-10T14:26:35,858 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-10T14:26:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T14:26:35,859 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:35,860 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:35,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:35,865 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/1ec81a0774b540dda6fac501991668c7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1ec81a0774b540dda6fac501991668c7 2024-12-10T14:26:35,869 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into 1ec81a0774b540dda6fac501991668c7(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:35,869 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:35,869 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=13, startTime=1733840795406; duration=0sec 2024-12-10T14:26:35,869 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:35,869 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:35,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T14:26:35,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840855997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:35,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840855997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:35,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840855998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840855998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,013 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:36,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T14:26:36,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:36,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,014 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/f207c717dfd84f3f85937ad2cdfb5ecb 2024-12-10T14:26:36,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/71289fdb02eb429da57d394ced202fab is 50, key is test_row_0/B:col10/1733840795675/Put/seqid=0 2024-12-10T14:26:36,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742139_1315 (size=12301) 2024-12-10T14:26:36,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T14:26:36,166 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:36,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T14:26:36,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:36,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840856299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840856300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840856301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840856301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,319 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:36,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T14:26:36,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:36,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T14:26:36,472 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:36,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T14:26:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:36,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/71289fdb02eb429da57d394ced202fab 2024-12-10T14:26:36,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/eaf2dc5b3f114efd9ac802242ffb6f74 is 50, key is test_row_0/C:col10/1733840795675/Put/seqid=0 2024-12-10T14:26:36,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742140_1316 (size=12301) 2024-12-10T14:26:36,625 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:36,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T14:26:36,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:36,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,778 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:36,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T14:26:36,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:36,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:36,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840856803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840856805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840856806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840856807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:36,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/eaf2dc5b3f114efd9ac802242ffb6f74 2024-12-10T14:26:36,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/f207c717dfd84f3f85937ad2cdfb5ecb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/f207c717dfd84f3f85937ad2cdfb5ecb 2024-12-10T14:26:36,919 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/f207c717dfd84f3f85937ad2cdfb5ecb, entries=150, sequenceid=275, filesize=12.0 K 2024-12-10T14:26:36,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/71289fdb02eb429da57d394ced202fab as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/71289fdb02eb429da57d394ced202fab 2024-12-10T14:26:36,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/71289fdb02eb429da57d394ced202fab, entries=150, sequenceid=275, filesize=12.0 K 2024-12-10T14:26:36,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/eaf2dc5b3f114efd9ac802242ffb6f74 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/eaf2dc5b3f114efd9ac802242ffb6f74 2024-12-10T14:26:36,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/eaf2dc5b3f114efd9ac802242ffb6f74, entries=150, sequenceid=275, filesize=12.0 K 2024-12-10T14:26:36,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 718b511ced67f3365dbb07f1afd9efaa in 1252ms, sequenceid=275, compaction requested=false 2024-12-10T14:26:36,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:36,930 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:36,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T14:26:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:36,931 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-10T14:26:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:36,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:36,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/271a27d19f914208b8831d689e98098b is 50, key is test_row_0/A:col10/1733840795689/Put/seqid=0 2024-12-10T14:26:36,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742141_1317 (size=12301) 2024-12-10T14:26:36,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T14:26:37,340 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/271a27d19f914208b8831d689e98098b 2024-12-10T14:26:37,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/323484acf71b48069c0a2a82e50a641c is 50, key is test_row_0/B:col10/1733840795689/Put/seqid=0 2024-12-10T14:26:37,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742142_1318 (size=12301) 2024-12-10T14:26:37,751 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/323484acf71b48069c0a2a82e50a641c 2024-12-10T14:26:37,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/e392d8beb38a4f93bd6468fce5958f3d is 50, key is test_row_0/C:col10/1733840795689/Put/seqid=0 2024-12-10T14:26:37,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742143_1319 (size=12301) 2024-12-10T14:26:37,763 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/e392d8beb38a4f93bd6468fce5958f3d 2024-12-10T14:26:37,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/271a27d19f914208b8831d689e98098b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/271a27d19f914208b8831d689e98098b 2024-12-10T14:26:37,770 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/271a27d19f914208b8831d689e98098b, entries=150, sequenceid=294, filesize=12.0 K 2024-12-10T14:26:37,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/323484acf71b48069c0a2a82e50a641c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/323484acf71b48069c0a2a82e50a641c 2024-12-10T14:26:37,779 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/323484acf71b48069c0a2a82e50a641c, entries=150, sequenceid=294, filesize=12.0 K 2024-12-10T14:26:37,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/e392d8beb38a4f93bd6468fce5958f3d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e392d8beb38a4f93bd6468fce5958f3d 2024-12-10T14:26:37,784 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e392d8beb38a4f93bd6468fce5958f3d, entries=150, sequenceid=294, filesize=12.0 K 2024-12-10T14:26:37,785 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=0 B/0 for 718b511ced67f3365dbb07f1afd9efaa in 854ms, sequenceid=294, compaction requested=true 2024-12-10T14:26:37,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:37,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:37,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-10T14:26:37,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-10T14:26:37,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-10T14:26:37,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9260 sec 2024-12-10T14:26:37,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.9300 sec 2024-12-10T14:26:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:37,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:26:37,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:37,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:37,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:37,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:37,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:37,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:37,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/550d1fe58a5a4dc39195e0e0bdf5088b is 50, key is test_row_0/A:col10/1733840797816/Put/seqid=0 2024-12-10T14:26:37,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840857837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840857838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840857839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840857840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742144_1320 (size=17181) 2024-12-10T14:26:37,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840857941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840857942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840857944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:37,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840857944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:37,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T14:26:37,963 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-10T14:26:37,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:37,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-10T14:26:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T14:26:37,966 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:37,966 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:37,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T14:26:38,118 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:38,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:38,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:38,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840858143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840858144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840858146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840858147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/550d1fe58a5a4dc39195e0e0bdf5088b 2024-12-10T14:26:38,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/b1ba5763fb114574a7079a4ee8dcfcbf is 50, key is test_row_0/B:col10/1733840797816/Put/seqid=0 2024-12-10T14:26:38,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742145_1321 (size=12301) 2024-12-10T14:26:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T14:26:38,271 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:38,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:38,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:38,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:38,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:38,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:38,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840858446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840858447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840858449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840858451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57500 deadline: 1733840858478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,481 DEBUG [Thread-1181 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18220 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:26:38,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T14:26:38,577 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:38,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:38,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:38,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/b1ba5763fb114574a7079a4ee8dcfcbf 2024-12-10T14:26:38,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/7baa0f69801c45d9b2d4b9b4dc6edb73 is 50, key is test_row_0/C:col10/1733840797816/Put/seqid=0 2024-12-10T14:26:38,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742146_1322 (size=12301) 2024-12-10T14:26:38,730 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:38,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:38,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:38,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:38,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:38,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:38,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:38,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:38,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840858949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840858951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840858952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840858954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:39,036 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:39,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:39,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:39,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:39,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:39,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:39,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T14:26:39,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/7baa0f69801c45d9b2d4b9b4dc6edb73 2024-12-10T14:26:39,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/550d1fe58a5a4dc39195e0e0bdf5088b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/550d1fe58a5a4dc39195e0e0bdf5088b 2024-12-10T14:26:39,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/550d1fe58a5a4dc39195e0e0bdf5088b, entries=250, sequenceid=306, filesize=16.8 K 2024-12-10T14:26:39,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/b1ba5763fb114574a7079a4ee8dcfcbf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/b1ba5763fb114574a7079a4ee8dcfcbf 2024-12-10T14:26:39,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/b1ba5763fb114574a7079a4ee8dcfcbf, entries=150, sequenceid=306, filesize=12.0 K 2024-12-10T14:26:39,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/7baa0f69801c45d9b2d4b9b4dc6edb73 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7baa0f69801c45d9b2d4b9b4dc6edb73 2024-12-10T14:26:39,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7baa0f69801c45d9b2d4b9b4dc6edb73, entries=150, sequenceid=306, filesize=12.0 K 2024-12-10T14:26:39,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 718b511ced67f3365dbb07f1afd9efaa in 1279ms, sequenceid=306, compaction requested=true 2024-12-10T14:26:39,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:39,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:39,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:39,096 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:26:39,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:39,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:39,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:39,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:39,096 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:26:39,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:26:39,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:39,099 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:39,099 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/37ae71f05f3a4c5dbc76e7108152335b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/71289fdb02eb429da57d394ced202fab, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/323484acf71b48069c0a2a82e50a641c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/b1ba5763fb114574a7079a4ee8dcfcbf] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=48.5 K 2024-12-10T14:26:39,099 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54548 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:26:39,099 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:39,099 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:39,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 37ae71f05f3a4c5dbc76e7108152335b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840793932 2024-12-10T14:26:39,099 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b59cba189ed1491b87e8c811f813add0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/f207c717dfd84f3f85937ad2cdfb5ecb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/271a27d19f914208b8831d689e98098b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/550d1fe58a5a4dc39195e0e0bdf5088b] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=53.3 K 2024-12-10T14:26:39,100 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b59cba189ed1491b87e8c811f813add0, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840793932 2024-12-10T14:26:39,100 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 71289fdb02eb429da57d394ced202fab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733840794561 2024-12-10T14:26:39,100 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting f207c717dfd84f3f85937ad2cdfb5ecb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733840794561 2024-12-10T14:26:39,100 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 323484acf71b48069c0a2a82e50a641c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733840795685 2024-12-10T14:26:39,101 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b1ba5763fb114574a7079a4ee8dcfcbf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733840797815 2024-12-10T14:26:39,101 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 271a27d19f914208b8831d689e98098b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733840795685 2024-12-10T14:26:39,101 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 550d1fe58a5a4dc39195e0e0bdf5088b, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733840797813 2024-12-10T14:26:39,112 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:39,113 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/3b0aa4c48a0846859450a94bc63357ba is 50, key is test_row_0/A:col10/1733840797816/Put/seqid=0 2024-12-10T14:26:39,123 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#271 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:39,123 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/016dac6040234113bccaca86d50b975d is 50, key is test_row_0/B:col10/1733840797816/Put/seqid=0 2024-12-10T14:26:39,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742147_1323 (size=13051) 2024-12-10T14:26:39,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742148_1324 (size=13051) 2024-12-10T14:26:39,189 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:39,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-10T14:26:39,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:39,189 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T14:26:39,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:39,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:39,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:39,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:39,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:39,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:39,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/32e22d0931f14fb5a96189e10a09d46e is 50, key is test_row_0/A:col10/1733840797838/Put/seqid=0 2024-12-10T14:26:39,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742149_1325 (size=12301) 2024-12-10T14:26:39,202 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/32e22d0931f14fb5a96189e10a09d46e 2024-12-10T14:26:39,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/27212b1c28b0456284ccb5cf963639aa is 50, key is test_row_0/B:col10/1733840797838/Put/seqid=0 2024-12-10T14:26:39,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742150_1326 (size=12301) 2024-12-10T14:26:39,560 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/3b0aa4c48a0846859450a94bc63357ba as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/3b0aa4c48a0846859450a94bc63357ba 2024-12-10T14:26:39,564 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into 3b0aa4c48a0846859450a94bc63357ba(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:39,564 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:39,564 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=12, startTime=1733840799096; duration=0sec 2024-12-10T14:26:39,565 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:39,565 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:39,565 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:26:39,566 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:26:39,566 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:39,566 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:39,566 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1ec81a0774b540dda6fac501991668c7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/eaf2dc5b3f114efd9ac802242ffb6f74, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e392d8beb38a4f93bd6468fce5958f3d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7baa0f69801c45d9b2d4b9b4dc6edb73] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=48.5 K 2024-12-10T14:26:39,566 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ec81a0774b540dda6fac501991668c7, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840793932 2024-12-10T14:26:39,567 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting eaf2dc5b3f114efd9ac802242ffb6f74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733840794561 2024-12-10T14:26:39,567 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting e392d8beb38a4f93bd6468fce5958f3d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1733840795685 2024-12-10T14:26:39,568 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7baa0f69801c45d9b2d4b9b4dc6edb73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733840797815 2024-12-10T14:26:39,574 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/016dac6040234113bccaca86d50b975d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/016dac6040234113bccaca86d50b975d 2024-12-10T14:26:39,579 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into 016dac6040234113bccaca86d50b975d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:39,579 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:39,579 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=12, startTime=1733840799096; duration=0sec 2024-12-10T14:26:39,579 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:39,579 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:39,598 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#274 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:39,598 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/91ee1f448cc94895afc1820ef80c661a is 50, key is test_row_0/C:col10/1733840797816/Put/seqid=0 2024-12-10T14:26:39,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742151_1327 (size=13051) 2024-12-10T14:26:39,615 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/27212b1c28b0456284ccb5cf963639aa 2024-12-10T14:26:39,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/f66a8555521f48279380286675e8321e is 50, key is test_row_0/C:col10/1733840797838/Put/seqid=0 2024-12-10T14:26:39,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742152_1328 (size=12301) 2024-12-10T14:26:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:39,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:39,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840859961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:39,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840859962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:39,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840859962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:39,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840859962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,008 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/91ee1f448cc94895afc1820ef80c661a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/91ee1f448cc94895afc1820ef80c661a 2024-12-10T14:26:40,012 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into 91ee1f448cc94895afc1820ef80c661a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:40,012 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:40,013 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=12, startTime=1733840799096; duration=0sec 2024-12-10T14:26:40,013 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:40,013 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:40,026 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/f66a8555521f48279380286675e8321e 2024-12-10T14:26:40,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/32e22d0931f14fb5a96189e10a09d46e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/32e22d0931f14fb5a96189e10a09d46e 2024-12-10T14:26:40,033 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/32e22d0931f14fb5a96189e10a09d46e, entries=150, sequenceid=331, filesize=12.0 K 2024-12-10T14:26:40,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/27212b1c28b0456284ccb5cf963639aa as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/27212b1c28b0456284ccb5cf963639aa 2024-12-10T14:26:40,037 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/27212b1c28b0456284ccb5cf963639aa, entries=150, sequenceid=331, filesize=12.0 K 2024-12-10T14:26:40,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/f66a8555521f48279380286675e8321e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/f66a8555521f48279380286675e8321e 2024-12-10T14:26:40,042 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/f66a8555521f48279380286675e8321e, entries=150, sequenceid=331, filesize=12.0 K 2024-12-10T14:26:40,043 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 718b511ced67f3365dbb07f1afd9efaa in 854ms, sequenceid=331, compaction requested=false 2024-12-10T14:26:40,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:40,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-10T14:26:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-10T14:26:40,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-10T14:26:40,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0780 sec 2024-12-10T14:26:40,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.0820 sec 2024-12-10T14:26:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:40,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T14:26:40,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:40,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:40,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:40,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:40,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:40,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T14:26:40,070 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-10T14:26:40,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-10T14:26:40,073 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T14:26:40,073 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:40,073 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:40,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/5491a21a9ae14d6e9b933952625e926e is 50, key is test_row_0/A:col10/1733840799954/Put/seqid=0 2024-12-10T14:26:40,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742153_1329 (size=12301) 2024-12-10T14:26:40,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840860090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840860094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840860094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840860094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T14:26:40,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840860195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840860198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840860198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840860198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,217 DEBUG [Thread-1186 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:58494 2024-12-10T14:26:40,217 DEBUG [Thread-1184 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:58494 2024-12-10T14:26:40,217 DEBUG [Thread-1184 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:40,217 DEBUG [Thread-1186 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:40,217 DEBUG [Thread-1188 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:58494 2024-12-10T14:26:40,217 DEBUG [Thread-1188 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:40,218 DEBUG [Thread-1190 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d1403c3 to 127.0.0.1:58494 2024-12-10T14:26:40,219 DEBUG [Thread-1190 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:40,219 DEBUG [Thread-1192 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3bf0ba59 to 127.0.0.1:58494 2024-12-10T14:26:40,219 DEBUG [Thread-1192 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:40,226 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:40,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:40,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:40,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T14:26:40,378 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:40,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:40,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840860399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840860401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840860402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840860402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/5491a21a9ae14d6e9b933952625e926e 2024-12-10T14:26:40,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/06b397ce16ff42d19715bb77066e6bad is 50, key is test_row_0/B:col10/1733840799954/Put/seqid=0 2024-12-10T14:26:40,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742154_1330 (size=12301) 2024-12-10T14:26:40,530 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:40,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:40,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:40,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T14:26:40,683 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:40,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:40,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:40,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,684 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840860701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840860704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840860705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840860705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:40,835 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:40,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,888 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/06b397ce16ff42d19715bb77066e6bad 2024-12-10T14:26:40,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/e8a6a67680e141f498e081ef62d3422e is 50, key is test_row_0/C:col10/1733840799954/Put/seqid=0 2024-12-10T14:26:40,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742155_1331 (size=12301) 2024-12-10T14:26:40,988 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:40,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:40,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:40,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:40,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:40,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:41,141 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:41,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:41,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:41,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:41,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:41,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:41,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T14:26:41,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:41,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57516 deadline: 1733840861203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:41,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:41,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57486 deadline: 1733840861208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:41,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:41,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57436 deadline: 1733840861208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:41,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:41,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57482 deadline: 1733840861209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:41,293 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:41,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:41,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:41,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:41,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:41,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:41,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/e8a6a67680e141f498e081ef62d3422e 2024-12-10T14:26:41,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/5491a21a9ae14d6e9b933952625e926e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5491a21a9ae14d6e9b933952625e926e 2024-12-10T14:26:41,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5491a21a9ae14d6e9b933952625e926e, entries=150, sequenceid=349, filesize=12.0 K 2024-12-10T14:26:41,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/06b397ce16ff42d19715bb77066e6bad as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/06b397ce16ff42d19715bb77066e6bad 2024-12-10T14:26:41,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/06b397ce16ff42d19715bb77066e6bad, entries=150, sequenceid=349, filesize=12.0 K 2024-12-10T14:26:41,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/e8a6a67680e141f498e081ef62d3422e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e8a6a67680e141f498e081ef62d3422e 2024-12-10T14:26:41,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e8a6a67680e141f498e081ef62d3422e, entries=150, sequenceid=349, filesize=12.0 K 2024-12-10T14:26:41,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=134.18 KB/137400 for 718b511ced67f3365dbb07f1afd9efaa in 1244ms, sequenceid=349, compaction requested=true 2024-12-10T14:26:41,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:41,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:41,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:41,312 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:41,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:41,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:41,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 718b511ced67f3365dbb07f1afd9efaa:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:41,312 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:41,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:41,313 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:41,313 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:41,313 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/B is initiating minor compaction (all files) 2024-12-10T14:26:41,313 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/A is initiating minor compaction (all files) 2024-12-10T14:26:41,313 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/B in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,313 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/A in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,313 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/016dac6040234113bccaca86d50b975d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/27212b1c28b0456284ccb5cf963639aa, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/06b397ce16ff42d19715bb77066e6bad] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.8 K 2024-12-10T14:26:41,313 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/3b0aa4c48a0846859450a94bc63357ba, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/32e22d0931f14fb5a96189e10a09d46e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5491a21a9ae14d6e9b933952625e926e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.8 K 2024-12-10T14:26:41,313 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 016dac6040234113bccaca86d50b975d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733840797815 2024-12-10T14:26:41,313 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b0aa4c48a0846859450a94bc63357ba, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733840797815 2024-12-10T14:26:41,313 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 27212b1c28b0456284ccb5cf963639aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733840797837 2024-12-10T14:26:41,314 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32e22d0931f14fb5a96189e10a09d46e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733840797837 2024-12-10T14:26:41,314 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 06b397ce16ff42d19715bb77066e6bad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1733840799954 2024-12-10T14:26:41,314 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5491a21a9ae14d6e9b933952625e926e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1733840799954 2024-12-10T14:26:41,320 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#B#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:41,320 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7c97bb7ae82648c09a6ad68cde43e629 is 50, key is test_row_0/B:col10/1733840799954/Put/seqid=0 2024-12-10T14:26:41,321 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#A#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:41,321 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/385141e6224342bfb61d0e354aa36538 is 50, key is test_row_0/A:col10/1733840799954/Put/seqid=0 2024-12-10T14:26:41,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742156_1332 (size=13153) 2024-12-10T14:26:41,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742157_1333 (size=13153) 2024-12-10T14:26:41,446 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:41,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-10T14:26:41,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,446 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T14:26:41,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:41,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:41,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:41,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:41,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:41,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:41,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/d297c9054fa6425d923c7e8b214db800 is 50, key is test_row_0/A:col10/1733840800093/Put/seqid=0 2024-12-10T14:26:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742158_1334 (size=12301) 2024-12-10T14:26:41,728 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/7c97bb7ae82648c09a6ad68cde43e629 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7c97bb7ae82648c09a6ad68cde43e629 2024-12-10T14:26:41,728 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/385141e6224342bfb61d0e354aa36538 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/385141e6224342bfb61d0e354aa36538 2024-12-10T14:26:41,732 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/A of 718b511ced67f3365dbb07f1afd9efaa into 385141e6224342bfb61d0e354aa36538(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:41,732 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/B of 718b511ced67f3365dbb07f1afd9efaa into 7c97bb7ae82648c09a6ad68cde43e629(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:41,732 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:41,732 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/A, priority=13, startTime=1733840801312; duration=0sec 2024-12-10T14:26:41,732 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:41,732 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/B, priority=13, startTime=1733840801312; duration=0sec 2024-12-10T14:26:41,732 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:41,732 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:A 2024-12-10T14:26:41,732 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:41,732 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:41,732 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:B 2024-12-10T14:26:41,733 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:41,733 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): 718b511ced67f3365dbb07f1afd9efaa/C is initiating minor compaction (all files) 2024-12-10T14:26:41,733 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 718b511ced67f3365dbb07f1afd9efaa/C in TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:41,733 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/91ee1f448cc94895afc1820ef80c661a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/f66a8555521f48279380286675e8321e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e8a6a67680e141f498e081ef62d3422e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp, totalSize=36.8 K 2024-12-10T14:26:41,733 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91ee1f448cc94895afc1820ef80c661a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733840797815 2024-12-10T14:26:41,733 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting f66a8555521f48279380286675e8321e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733840797837 2024-12-10T14:26:41,733 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8a6a67680e141f498e081ef62d3422e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1733840799954 2024-12-10T14:26:41,739 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 718b511ced67f3365dbb07f1afd9efaa#C#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:41,739 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/6cd8e944d0bf4ddf8f40781f3bdf5d7f is 50, key is test_row_0/C:col10/1733840799954/Put/seqid=0 2024-12-10T14:26:41,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742159_1335 (size=13153) 2024-12-10T14:26:41,854 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/d297c9054fa6425d923c7e8b214db800 2024-12-10T14:26:41,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/9795965d516a447197e39265e17d1d84 is 50, key is test_row_0/B:col10/1733840800093/Put/seqid=0 2024-12-10T14:26:41,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742160_1336 (size=12301) 2024-12-10T14:26:42,147 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/6cd8e944d0bf4ddf8f40781f3bdf5d7f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/6cd8e944d0bf4ddf8f40781f3bdf5d7f 2024-12-10T14:26:42,151 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 718b511ced67f3365dbb07f1afd9efaa/C of 718b511ced67f3365dbb07f1afd9efaa into 6cd8e944d0bf4ddf8f40781f3bdf5d7f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:42,151 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:42,151 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa., storeName=718b511ced67f3365dbb07f1afd9efaa/C, priority=13, startTime=1733840801312; duration=0sec 2024-12-10T14:26:42,151 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:42,151 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 718b511ced67f3365dbb07f1afd9efaa:C 2024-12-10T14:26:42,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T14:26:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:42,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. as already flushing 2024-12-10T14:26:42,211 DEBUG [Thread-1173 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:58494 2024-12-10T14:26:42,211 DEBUG [Thread-1173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:42,212 DEBUG [Thread-1175 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:58494 2024-12-10T14:26:42,212 DEBUG [Thread-1175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:42,218 DEBUG [Thread-1177 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:58494 2024-12-10T14:26:42,218 DEBUG [Thread-1177 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:42,219 DEBUG [Thread-1179 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03a703d2 to 127.0.0.1:58494 2024-12-10T14:26:42,219 DEBUG [Thread-1179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:42,263 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/9795965d516a447197e39265e17d1d84 2024-12-10T14:26:42,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/87aab811e1914092a274049c32f66604 is 50, key is test_row_0/C:col10/1733840800093/Put/seqid=0 2024-12-10T14:26:42,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742161_1337 (size=12301) 2024-12-10T14:26:42,673 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/87aab811e1914092a274049c32f66604 2024-12-10T14:26:42,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/d297c9054fa6425d923c7e8b214db800 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d297c9054fa6425d923c7e8b214db800 2024-12-10T14:26:42,679 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d297c9054fa6425d923c7e8b214db800, entries=150, sequenceid=372, filesize=12.0 K 2024-12-10T14:26:42,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/9795965d516a447197e39265e17d1d84 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/9795965d516a447197e39265e17d1d84 2024-12-10T14:26:42,683 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/9795965d516a447197e39265e17d1d84, entries=150, sequenceid=372, filesize=12.0 K 2024-12-10T14:26:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/87aab811e1914092a274049c32f66604 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/87aab811e1914092a274049c32f66604 2024-12-10T14:26:42,686 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/87aab811e1914092a274049c32f66604, entries=150, sequenceid=372, filesize=12.0 K 2024-12-10T14:26:42,687 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=26.84 KB/27480 for 718b511ced67f3365dbb07f1afd9efaa in 1241ms, sequenceid=372, compaction requested=false 2024-12-10T14:26:42,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:42,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:42,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-10T14:26:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-10T14:26:42,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-10T14:26:42,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6150 sec 2024-12-10T14:26:42,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 2.6180 sec 2024-12-10T14:26:44,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T14:26:44,177 INFO [Thread-1183 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-10T14:26:46,177 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:26:48,528 DEBUG [Thread-1181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:58494 2024-12-10T14:26:48,528 DEBUG [Thread-1181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 6 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7756 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7399 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7451 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7750 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7354 2024-12-10T14:26:48,528 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T14:26:48,528 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:26:48,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:58494 2024-12-10T14:26:48,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:26:48,529 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T14:26:48,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T14:26:48,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:48,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:26:48,532 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840808531"}]},"ts":"1733840808531"} 2024-12-10T14:26:48,532 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T14:26:48,534 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T14:26:48,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:26:48,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=718b511ced67f3365dbb07f1afd9efaa, UNASSIGN}] 2024-12-10T14:26:48,536 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=718b511ced67f3365dbb07f1afd9efaa, UNASSIGN 2024-12-10T14:26:48,537 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=718b511ced67f3365dbb07f1afd9efaa, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:48,537 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:26:48,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure 718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:26:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:26:48,688 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:48,689 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing 718b511ced67f3365dbb07f1afd9efaa, disabling compactions & flushes 2024-12-10T14:26:48,689 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. after waiting 0 ms 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:48,689 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing 718b511ced67f3365dbb07f1afd9efaa 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=A 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=B 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 718b511ced67f3365dbb07f1afd9efaa, store=C 2024-12-10T14:26:48,689 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:48,693 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b073e18a3e444648a529bf291528129f is 50, key is test_row_0/A:col10/1733840802217/Put/seqid=0 2024-12-10T14:26:48,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742162_1338 (size=12301) 2024-12-10T14:26:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:26:49,096 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b073e18a3e444648a529bf291528129f 2024-12-10T14:26:49,102 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/e669de1cefc44e9a8119c7b9eb48726e is 50, key is test_row_0/B:col10/1733840802217/Put/seqid=0 2024-12-10T14:26:49,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742163_1339 (size=12301) 2024-12-10T14:26:49,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:26:49,505 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/e669de1cefc44e9a8119c7b9eb48726e 2024-12-10T14:26:49,511 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/6b280793b9d641d69edf07af0d3a2e2c is 50, key is test_row_0/C:col10/1733840802217/Put/seqid=0 2024-12-10T14:26:49,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742164_1340 (size=12301) 2024-12-10T14:26:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:26:49,915 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/6b280793b9d641d69edf07af0d3a2e2c 2024-12-10T14:26:49,918 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/A/b073e18a3e444648a529bf291528129f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b073e18a3e444648a529bf291528129f 2024-12-10T14:26:49,921 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b073e18a3e444648a529bf291528129f, entries=150, sequenceid=383, filesize=12.0 K 2024-12-10T14:26:49,921 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/B/e669de1cefc44e9a8119c7b9eb48726e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/e669de1cefc44e9a8119c7b9eb48726e 2024-12-10T14:26:49,924 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/e669de1cefc44e9a8119c7b9eb48726e, entries=150, sequenceid=383, filesize=12.0 K 2024-12-10T14:26:49,924 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/.tmp/C/6b280793b9d641d69edf07af0d3a2e2c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/6b280793b9d641d69edf07af0d3a2e2c 2024-12-10T14:26:49,927 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/6b280793b9d641d69edf07af0d3a2e2c, entries=150, sequenceid=383, filesize=12.0 K 2024-12-10T14:26:49,928 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 718b511ced67f3365dbb07f1afd9efaa in 1238ms, sequenceid=383, compaction requested=true 2024-12-10T14:26:49,928 DEBUG [StoreCloser-TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/c62ae1ce59704076b6808ae414f3bb0d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/982a5e17c5824628aac977c0f07432cc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/42f2e03d9f6048499fa8653dcb609501, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ff03f5f0899a42a7bd5516cd3d946ba9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6d0a87d3d5564dbc81b3fbc555c6ce6f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d15f0a4ddb2843d79c4f0b4a0744fb73, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6cb19355989a4a33befdb3568e1e621c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1407704c313546e48b3888aa4aee04fc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e5b0cf0818fa4934a912d04c4ab16720, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ba8e73c456f74c8088fd4b973733663e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/75176d7e277a4b4aac1db9afe3b72739, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b682f715433a44cdb4594b6d9a83c750, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1bcc9ca5d9ad408e979262e7233898a3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/234c69408d9a4407a83ed6b720707d9e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5dfe1fd986f348e39b6e8bd231b54bfe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/7c9eafefec8846c2b209dd39a29ee0be, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/46edc050932c48f2b50215426472573b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b59cba189ed1491b87e8c811f813add0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e16251b370484f189bfda0b8d1a1158e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/f207c717dfd84f3f85937ad2cdfb5ecb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/271a27d19f914208b8831d689e98098b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/550d1fe58a5a4dc39195e0e0bdf5088b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/3b0aa4c48a0846859450a94bc63357ba, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/32e22d0931f14fb5a96189e10a09d46e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5491a21a9ae14d6e9b933952625e926e] to archive 2024-12-10T14:26:49,929 DEBUG [StoreCloser-TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:26:49,931 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/982a5e17c5824628aac977c0f07432cc to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/982a5e17c5824628aac977c0f07432cc 2024-12-10T14:26:49,931 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/c62ae1ce59704076b6808ae414f3bb0d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/c62ae1ce59704076b6808ae414f3bb0d 2024-12-10T14:26:49,931 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ff03f5f0899a42a7bd5516cd3d946ba9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ff03f5f0899a42a7bd5516cd3d946ba9 2024-12-10T14:26:49,931 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/42f2e03d9f6048499fa8653dcb609501 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/42f2e03d9f6048499fa8653dcb609501 2024-12-10T14:26:49,931 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6d0a87d3d5564dbc81b3fbc555c6ce6f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6d0a87d3d5564dbc81b3fbc555c6ce6f 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6cb19355989a4a33befdb3568e1e621c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/6cb19355989a4a33befdb3568e1e621c 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e5b0cf0818fa4934a912d04c4ab16720 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e5b0cf0818fa4934a912d04c4ab16720 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1407704c313546e48b3888aa4aee04fc to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1407704c313546e48b3888aa4aee04fc 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1bcc9ca5d9ad408e979262e7233898a3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/1bcc9ca5d9ad408e979262e7233898a3 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ba8e73c456f74c8088fd4b973733663e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/ba8e73c456f74c8088fd4b973733663e 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d15f0a4ddb2843d79c4f0b4a0744fb73 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d15f0a4ddb2843d79c4f0b4a0744fb73 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/75176d7e277a4b4aac1db9afe3b72739 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/75176d7e277a4b4aac1db9afe3b72739 2024-12-10T14:26:49,933 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b682f715433a44cdb4594b6d9a83c750 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b682f715433a44cdb4594b6d9a83c750 2024-12-10T14:26:49,935 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/7c9eafefec8846c2b209dd39a29ee0be to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/7c9eafefec8846c2b209dd39a29ee0be 2024-12-10T14:26:49,935 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/234c69408d9a4407a83ed6b720707d9e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/234c69408d9a4407a83ed6b720707d9e 2024-12-10T14:26:49,935 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b59cba189ed1491b87e8c811f813add0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b59cba189ed1491b87e8c811f813add0 2024-12-10T14:26:49,935 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5dfe1fd986f348e39b6e8bd231b54bfe to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5dfe1fd986f348e39b6e8bd231b54bfe 2024-12-10T14:26:49,935 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e16251b370484f189bfda0b8d1a1158e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/e16251b370484f189bfda0b8d1a1158e 2024-12-10T14:26:49,935 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/f207c717dfd84f3f85937ad2cdfb5ecb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/f207c717dfd84f3f85937ad2cdfb5ecb 2024-12-10T14:26:49,936 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/46edc050932c48f2b50215426472573b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/46edc050932c48f2b50215426472573b 2024-12-10T14:26:49,936 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/271a27d19f914208b8831d689e98098b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/271a27d19f914208b8831d689e98098b 2024-12-10T14:26:49,936 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/3b0aa4c48a0846859450a94bc63357ba to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/3b0aa4c48a0846859450a94bc63357ba 2024-12-10T14:26:49,937 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/550d1fe58a5a4dc39195e0e0bdf5088b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/550d1fe58a5a4dc39195e0e0bdf5088b 2024-12-10T14:26:49,937 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/32e22d0931f14fb5a96189e10a09d46e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/32e22d0931f14fb5a96189e10a09d46e 2024-12-10T14:26:49,937 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5491a21a9ae14d6e9b933952625e926e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/5491a21a9ae14d6e9b933952625e926e 2024-12-10T14:26:49,938 DEBUG [StoreCloser-TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7bad3117ece74a5a9fa57a3660620925, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/5022554ece15460ca83e4f489bef3c80, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/ba5b4ff4ec334caba92fd8d03fd4d0d3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c6814bb8e4b04fcd975f2fc0fce0bd7e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/fc508aacfe6340b4b5a6573255723746, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/3ded05b0b2d348f19790d6e8903f44fe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/0185750d33ff4a6b96007d21c955851e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c0e705af4c9a434fb4bbd2a11540cc55, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d79937d2dbd447c9318008c2f7fc36a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/87583b8132f8491090c5e47037ad0cb9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/1f30ee75f7a04136924b3e9e0dc514a8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c766e4f50f7c4a4c97392984ce8e49cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/db78053980bf4d76b48a71e8335e7450, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/a55bf3b7571e4c5e9915cb308779e68e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/8eba74751fbd4895bc4fe5d6efac4efe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d40915d75c94e21aec556939d1f6bf4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/771e4e7c07d04e549f280f97e4854341, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/37ae71f05f3a4c5dbc76e7108152335b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/520015d93d6d4d02954c5422e21427c0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/71289fdb02eb429da57d394ced202fab, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/323484acf71b48069c0a2a82e50a641c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/016dac6040234113bccaca86d50b975d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/b1ba5763fb114574a7079a4ee8dcfcbf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/27212b1c28b0456284ccb5cf963639aa, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/06b397ce16ff42d19715bb77066e6bad] to archive 2024-12-10T14:26:49,939 DEBUG [StoreCloser-TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:26:49,941 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7bad3117ece74a5a9fa57a3660620925 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7bad3117ece74a5a9fa57a3660620925 2024-12-10T14:26:49,941 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/5022554ece15460ca83e4f489bef3c80 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/5022554ece15460ca83e4f489bef3c80 2024-12-10T14:26:49,941 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/fc508aacfe6340b4b5a6573255723746 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/fc508aacfe6340b4b5a6573255723746 2024-12-10T14:26:49,941 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c6814bb8e4b04fcd975f2fc0fce0bd7e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c6814bb8e4b04fcd975f2fc0fce0bd7e 2024-12-10T14:26:49,941 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/0185750d33ff4a6b96007d21c955851e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/0185750d33ff4a6b96007d21c955851e 2024-12-10T14:26:49,942 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/3ded05b0b2d348f19790d6e8903f44fe to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/3ded05b0b2d348f19790d6e8903f44fe 2024-12-10T14:26:49,942 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c0e705af4c9a434fb4bbd2a11540cc55 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c0e705af4c9a434fb4bbd2a11540cc55 2024-12-10T14:26:49,943 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d79937d2dbd447c9318008c2f7fc36a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d79937d2dbd447c9318008c2f7fc36a 2024-12-10T14:26:49,943 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/87583b8132f8491090c5e47037ad0cb9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/87583b8132f8491090c5e47037ad0cb9 2024-12-10T14:26:49,943 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/1f30ee75f7a04136924b3e9e0dc514a8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/1f30ee75f7a04136924b3e9e0dc514a8 2024-12-10T14:26:49,943 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c766e4f50f7c4a4c97392984ce8e49cf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/c766e4f50f7c4a4c97392984ce8e49cf 2024-12-10T14:26:49,944 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/db78053980bf4d76b48a71e8335e7450 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/db78053980bf4d76b48a71e8335e7450 2024-12-10T14:26:49,944 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/ba5b4ff4ec334caba92fd8d03fd4d0d3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/ba5b4ff4ec334caba92fd8d03fd4d0d3 2024-12-10T14:26:49,945 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/8eba74751fbd4895bc4fe5d6efac4efe to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/8eba74751fbd4895bc4fe5d6efac4efe 2024-12-10T14:26:49,945 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/a55bf3b7571e4c5e9915cb308779e68e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/a55bf3b7571e4c5e9915cb308779e68e 2024-12-10T14:26:49,946 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/771e4e7c07d04e549f280f97e4854341 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/771e4e7c07d04e549f280f97e4854341 2024-12-10T14:26:49,946 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d40915d75c94e21aec556939d1f6bf4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7d40915d75c94e21aec556939d1f6bf4 2024-12-10T14:26:49,946 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/520015d93d6d4d02954c5422e21427c0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/520015d93d6d4d02954c5422e21427c0 2024-12-10T14:26:49,946 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/37ae71f05f3a4c5dbc76e7108152335b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/37ae71f05f3a4c5dbc76e7108152335b 2024-12-10T14:26:49,947 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/323484acf71b48069c0a2a82e50a641c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/323484acf71b48069c0a2a82e50a641c 2024-12-10T14:26:49,947 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/71289fdb02eb429da57d394ced202fab to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/71289fdb02eb429da57d394ced202fab 2024-12-10T14:26:49,947 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/b1ba5763fb114574a7079a4ee8dcfcbf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/b1ba5763fb114574a7079a4ee8dcfcbf 2024-12-10T14:26:49,947 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/27212b1c28b0456284ccb5cf963639aa to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/27212b1c28b0456284ccb5cf963639aa 2024-12-10T14:26:49,947 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/016dac6040234113bccaca86d50b975d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/016dac6040234113bccaca86d50b975d 2024-12-10T14:26:49,947 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/06b397ce16ff42d19715bb77066e6bad to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/06b397ce16ff42d19715bb77066e6bad 2024-12-10T14:26:49,948 DEBUG [StoreCloser-TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/48a36015801a448e86b56e63a1dff9f8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/5251b2a2b6ba4eecb615d0c1db7856db, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/40d2944a5d514ce7b6754187c27f8976, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d4e8a76f6c2e4a36b0da3a82330a74ac, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/debb634fa5a14da08b8099cf70c76cd3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4288ef74f57e4dfdbc69935c4ad374d5, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1d084e97d224439895ed17815621a3f1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b1bc690e22324df1ba9415a8df775a45, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d334f1d26c1a4016a4c38932bf0cef17, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b23cf149484e47e3af1fc78c9174e686, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7470ddb473b34e8f885382be87d14307, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/c642479d73224934b5156c05480845f4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4277cdec89414c3bac0d274b8159d593, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/734790bc1b524994ac2d437f75d77832, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/9d1a802849b3438fb7a44f6576ecb208, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/583137b91005417c9667c7f40c3194d5, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/66640d3173f142b59d89e6adcb6cba2b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1ec81a0774b540dda6fac501991668c7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/747795a556904f149e4c462961359e14, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/eaf2dc5b3f114efd9ac802242ffb6f74, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e392d8beb38a4f93bd6468fce5958f3d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/91ee1f448cc94895afc1820ef80c661a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7baa0f69801c45d9b2d4b9b4dc6edb73, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/f66a8555521f48279380286675e8321e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e8a6a67680e141f498e081ef62d3422e] to archive 2024-12-10T14:26:49,949 DEBUG [StoreCloser-TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:26:49,951 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/48a36015801a448e86b56e63a1dff9f8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/48a36015801a448e86b56e63a1dff9f8 2024-12-10T14:26:49,951 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/40d2944a5d514ce7b6754187c27f8976 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/40d2944a5d514ce7b6754187c27f8976 2024-12-10T14:26:49,952 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1d084e97d224439895ed17815621a3f1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1d084e97d224439895ed17815621a3f1 2024-12-10T14:26:49,952 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b1bc690e22324df1ba9415a8df775a45 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b1bc690e22324df1ba9415a8df775a45 2024-12-10T14:26:49,952 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4288ef74f57e4dfdbc69935c4ad374d5 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4288ef74f57e4dfdbc69935c4ad374d5 2024-12-10T14:26:49,952 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/debb634fa5a14da08b8099cf70c76cd3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/debb634fa5a14da08b8099cf70c76cd3 2024-12-10T14:26:49,952 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d4e8a76f6c2e4a36b0da3a82330a74ac to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d4e8a76f6c2e4a36b0da3a82330a74ac 2024-12-10T14:26:49,953 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/5251b2a2b6ba4eecb615d0c1db7856db to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/5251b2a2b6ba4eecb615d0c1db7856db 2024-12-10T14:26:49,953 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d334f1d26c1a4016a4c38932bf0cef17 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/d334f1d26c1a4016a4c38932bf0cef17 2024-12-10T14:26:49,954 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/c642479d73224934b5156c05480845f4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/c642479d73224934b5156c05480845f4 2024-12-10T14:26:49,954 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7470ddb473b34e8f885382be87d14307 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7470ddb473b34e8f885382be87d14307 2024-12-10T14:26:49,954 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/734790bc1b524994ac2d437f75d77832 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/734790bc1b524994ac2d437f75d77832 2024-12-10T14:26:49,954 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/66640d3173f142b59d89e6adcb6cba2b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/66640d3173f142b59d89e6adcb6cba2b 2024-12-10T14:26:49,954 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b23cf149484e47e3af1fc78c9174e686 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/b23cf149484e47e3af1fc78c9174e686 2024-12-10T14:26:49,955 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/583137b91005417c9667c7f40c3194d5 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/583137b91005417c9667c7f40c3194d5 2024-12-10T14:26:49,955 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/9d1a802849b3438fb7a44f6576ecb208 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/9d1a802849b3438fb7a44f6576ecb208 2024-12-10T14:26:49,955 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4277cdec89414c3bac0d274b8159d593 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/4277cdec89414c3bac0d274b8159d593 2024-12-10T14:26:49,956 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1ec81a0774b540dda6fac501991668c7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/1ec81a0774b540dda6fac501991668c7 2024-12-10T14:26:49,956 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/747795a556904f149e4c462961359e14 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/747795a556904f149e4c462961359e14 2024-12-10T14:26:49,956 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/eaf2dc5b3f114efd9ac802242ffb6f74 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/eaf2dc5b3f114efd9ac802242ffb6f74 2024-12-10T14:26:49,956 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e392d8beb38a4f93bd6468fce5958f3d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e392d8beb38a4f93bd6468fce5958f3d 2024-12-10T14:26:49,957 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/91ee1f448cc94895afc1820ef80c661a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/91ee1f448cc94895afc1820ef80c661a 2024-12-10T14:26:49,957 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e8a6a67680e141f498e081ef62d3422e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/e8a6a67680e141f498e081ef62d3422e 2024-12-10T14:26:49,957 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7baa0f69801c45d9b2d4b9b4dc6edb73 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/7baa0f69801c45d9b2d4b9b4dc6edb73 2024-12-10T14:26:49,957 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/f66a8555521f48279380286675e8321e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/f66a8555521f48279380286675e8321e 2024-12-10T14:26:49,961 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/recovered.edits/386.seqid, newMaxSeqId=386, maxSeqId=1 2024-12-10T14:26:49,962 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa. 2024-12-10T14:26:49,962 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for 718b511ced67f3365dbb07f1afd9efaa: 2024-12-10T14:26:49,963 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed 718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:49,964 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=718b511ced67f3365dbb07f1afd9efaa, regionState=CLOSED 2024-12-10T14:26:49,965 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-10T14:26:49,966 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure 718b511ced67f3365dbb07f1afd9efaa, server=db1d50717577,46699,1733840717757 in 1.4270 sec 2024-12-10T14:26:49,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-10T14:26:49,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=718b511ced67f3365dbb07f1afd9efaa, UNASSIGN in 1.4290 sec 2024-12-10T14:26:49,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-10T14:26:49,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4320 sec 2024-12-10T14:26:49,969 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840809969"}]},"ts":"1733840809969"} 2024-12-10T14:26:49,970 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T14:26:49,973 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T14:26:49,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4450 sec 2024-12-10T14:26:50,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T14:26:50,635 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-10T14:26:50,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T14:26:50,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:50,636 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-10T14:26:50,637 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:50,638 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:50,640 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/recovered.edits] 2024-12-10T14:26:50,643 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d297c9054fa6425d923c7e8b214db800 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/d297c9054fa6425d923c7e8b214db800 2024-12-10T14:26:50,643 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b073e18a3e444648a529bf291528129f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/b073e18a3e444648a529bf291528129f 2024-12-10T14:26:50,643 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/385141e6224342bfb61d0e354aa36538 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/A/385141e6224342bfb61d0e354aa36538 2024-12-10T14:26:50,645 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7c97bb7ae82648c09a6ad68cde43e629 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/7c97bb7ae82648c09a6ad68cde43e629 2024-12-10T14:26:50,645 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/e669de1cefc44e9a8119c7b9eb48726e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/e669de1cefc44e9a8119c7b9eb48726e 2024-12-10T14:26:50,645 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/9795965d516a447197e39265e17d1d84 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/B/9795965d516a447197e39265e17d1d84 2024-12-10T14:26:50,648 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/87aab811e1914092a274049c32f66604 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/87aab811e1914092a274049c32f66604 2024-12-10T14:26:50,648 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/6b280793b9d641d69edf07af0d3a2e2c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/6b280793b9d641d69edf07af0d3a2e2c 2024-12-10T14:26:50,648 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/6cd8e944d0bf4ddf8f40781f3bdf5d7f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/C/6cd8e944d0bf4ddf8f40781f3bdf5d7f 2024-12-10T14:26:50,651 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/recovered.edits/386.seqid to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa/recovered.edits/386.seqid 2024-12-10T14:26:50,651 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/718b511ced67f3365dbb07f1afd9efaa 2024-12-10T14:26:50,651 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T14:26:50,653 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:50,657 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T14:26:50,658 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T14:26:50,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:50,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T14:26:50,660 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733840810659"}]},"ts":"9223372036854775807"} 2024-12-10T14:26:50,661 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T14:26:50,661 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 718b511ced67f3365dbb07f1afd9efaa, NAME => 'TestAcidGuarantees,,1733840778061.718b511ced67f3365dbb07f1afd9efaa.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T14:26:50,661 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T14:26:50,661 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733840810661"}]},"ts":"9223372036854775807"} 2024-12-10T14:26:50,662 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T14:26:50,664 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:50,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 29 msec 2024-12-10T14:26:50,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-10T14:26:50,738 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-10T14:26:50,747 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=244 (was 248), OpenFileDescriptor=449 (was 462), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=242 (was 261), ProcessCount=11 (was 11), AvailableMemoryMB=2344 (was 2387) 2024-12-10T14:26:50,755 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=244, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=242, ProcessCount=11, AvailableMemoryMB=2344 2024-12-10T14:26:50,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:26:50,757 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:26:50,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:50,758 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:26:50,758 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:50,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-12-10T14:26:50,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-10T14:26:50,759 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:26:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742165_1341 (size=963) 2024-12-10T14:26:50,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-10T14:26:51,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-10T14:26:51,166 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:26:51,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742166_1342 (size=53) 2024-12-10T14:26:51,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-10T14:26:51,571 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:26:51,571 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ff0e5baacca9aaf73756a7fad4bfbee2, disabling compactions & flushes 2024-12-10T14:26:51,571 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,571 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,571 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. after waiting 0 ms 2024-12-10T14:26:51,571 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,571 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,571 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:51,572 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:26:51,573 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733840811572"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733840811572"}]},"ts":"1733840811572"} 2024-12-10T14:26:51,573 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:26:51,574 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:26:51,574 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840811574"}]},"ts":"1733840811574"} 2024-12-10T14:26:51,575 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T14:26:51,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, ASSIGN}] 2024-12-10T14:26:51,579 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, ASSIGN 2024-12-10T14:26:51,579 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, ASSIGN; state=OFFLINE, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=false 2024-12-10T14:26:51,730 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:51,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:26:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-10T14:26:51,882 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:51,885 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,885 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:26:51,885 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,885 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:26:51,885 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,885 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,886 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,887 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:51,887 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff0e5baacca9aaf73756a7fad4bfbee2 columnFamilyName A 2024-12-10T14:26:51,887 DEBUG [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:51,888 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(327): Store=ff0e5baacca9aaf73756a7fad4bfbee2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:51,888 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,889 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:51,889 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff0e5baacca9aaf73756a7fad4bfbee2 columnFamilyName B 2024-12-10T14:26:51,889 DEBUG [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:51,890 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(327): Store=ff0e5baacca9aaf73756a7fad4bfbee2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:51,890 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,891 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:51,891 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff0e5baacca9aaf73756a7fad4bfbee2 columnFamilyName C 2024-12-10T14:26:51,891 DEBUG [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:51,891 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(327): Store=ff0e5baacca9aaf73756a7fad4bfbee2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:51,891 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,892 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,892 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,893 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:26:51,894 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:51,896 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:26:51,896 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened ff0e5baacca9aaf73756a7fad4bfbee2; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74333789, jitterRate=0.10765977203845978}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:26:51,897 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:51,897 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., pid=96, masterSystemTime=1733840811882 2024-12-10T14:26:51,898 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,898 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:51,899 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:51,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-10T14:26:51,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 in 168 msec 2024-12-10T14:26:51,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-10T14:26:51,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, ASSIGN in 322 msec 2024-12-10T14:26:51,902 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:26:51,902 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840811902"}]},"ts":"1733840811902"} 2024-12-10T14:26:51,903 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T14:26:51,906 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:26:51,907 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1490 sec 2024-12-10T14:26:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-10T14:26:52,863 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-12-10T14:26:52,864 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-12-10T14:26:52,867 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:52,868 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:52,869 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:52,870 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:26:52,871 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:26:52,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:26:52,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:26:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T14:26:52,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742167_1343 (size=999) 2024-12-10T14:26:53,282 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T14:26:53,283 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T14:26:53,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:26:53,286 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, REOPEN/MOVE}] 2024-12-10T14:26:53,286 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, REOPEN/MOVE 2024-12-10T14:26:53,287 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,288 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:26:53,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:26:53,439 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:53,440 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,440 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:26:53,440 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing ff0e5baacca9aaf73756a7fad4bfbee2, disabling compactions & flushes 2024-12-10T14:26:53,440 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,440 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,440 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. after waiting 0 ms 2024-12-10T14:26:53,440 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,443 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T14:26:53,443 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,444 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:53,444 WARN [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: ff0e5baacca9aaf73756a7fad4bfbee2 to self. 2024-12-10T14:26:53,445 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,445 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=CLOSED 2024-12-10T14:26:53,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-10T14:26:53,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 in 158 msec 2024-12-10T14:26:53,447 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, REOPEN/MOVE; state=CLOSED, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=true 2024-12-10T14:26:53,598 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:26:53,750 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:53,752 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,753 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:26:53,753 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,753 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:26:53,753 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,753 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,754 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,755 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:53,755 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff0e5baacca9aaf73756a7fad4bfbee2 columnFamilyName A 2024-12-10T14:26:53,756 DEBUG [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:53,757 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(327): Store=ff0e5baacca9aaf73756a7fad4bfbee2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:53,757 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,757 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:53,757 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff0e5baacca9aaf73756a7fad4bfbee2 columnFamilyName B 2024-12-10T14:26:53,758 DEBUG [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:53,758 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(327): Store=ff0e5baacca9aaf73756a7fad4bfbee2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:53,758 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,758 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:26:53,758 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff0e5baacca9aaf73756a7fad4bfbee2 columnFamilyName C 2024-12-10T14:26:53,758 DEBUG [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:53,759 INFO [StoreOpener-ff0e5baacca9aaf73756a7fad4bfbee2-1 {}] regionserver.HStore(327): Store=ff0e5baacca9aaf73756a7fad4bfbee2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:26:53,759 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,759 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,760 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,761 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:26:53,762 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,763 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened ff0e5baacca9aaf73756a7fad4bfbee2; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67568280, jitterRate=0.006845831871032715}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:26:53,763 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:53,764 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., pid=101, masterSystemTime=1733840813750 2024-12-10T14:26:53,765 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,765 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,765 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=OPEN, openSeqNum=5, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-10T14:26:53,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 in 168 msec 2024-12-10T14:26:53,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-10T14:26:53,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, REOPEN/MOVE in 481 msec 2024-12-10T14:26:53,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-10T14:26:53,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 485 msec 2024-12-10T14:26:53,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 897 msec 2024-12-10T14:26:53,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-10T14:26:53,773 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-12-10T14:26:53,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,778 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-12-10T14:26:53,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,783 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f94d721 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5aee939b 2024-12-10T14:26:53,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e247aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,786 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-12-10T14:26:53,788 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,789 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-12-10T14:26:53,791 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,792 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-12-10T14:26:53,795 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,795 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-12-10T14:26:53,798 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,798 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68035c67 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@627cad17 2024-12-10T14:26:53,801 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a637ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,801 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3eab689a to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39387e4d 2024-12-10T14:26:53,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fa53591, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,804 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x59bd764a to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@238db126 2024-12-10T14:26:53,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3512017b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:26:53,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:53,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-10T14:26:53,812 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:53,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T14:26:53,813 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:53,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:53,813 DEBUG [hconnection-0x6483d2d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,814 DEBUG [hconnection-0x10d97d76-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,814 DEBUG [hconnection-0x3af42210-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,815 DEBUG [hconnection-0x5146b7b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,815 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,815 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58214, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,815 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,815 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,817 DEBUG [hconnection-0x1a551c66-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,817 DEBUG [hconnection-0x78d03b98-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,818 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,818 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,819 DEBUG [hconnection-0x467e061f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,820 DEBUG [hconnection-0x494ad1ee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,821 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,821 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:53,821 DEBUG [hconnection-0x4dad55fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,821 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:26:53,821 DEBUG [hconnection-0x385d6ccc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:26:53,822 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,822 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:26:53,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:26:53,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:53,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:26:53,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:53,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:26:53,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:53,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840873835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840873837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840873840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840873841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840873841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108e01d9f9b71a40fa922997da2ca4679d_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840813820/Put/seqid=0 2024-12-10T14:26:53,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742168_1344 (size=12154) 2024-12-10T14:26:53,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T14:26:53,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840873941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840873943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840873944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840873945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:53,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840873945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:53,965 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:53,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:53,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:53,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:53,966 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:53,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:53,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T14:26:54,118 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:54,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:54,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840874147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840874147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840874148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840874148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840874149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,258 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:54,262 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108e01d9f9b71a40fa922997da2ca4679d_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108e01d9f9b71a40fa922997da2ca4679d_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:54,262 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/99ae69adccfc4aa6a9e83f06098dfadd, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:54,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/99ae69adccfc4aa6a9e83f06098dfadd is 175, key is test_row_0/A:col10/1733840813820/Put/seqid=0 2024-12-10T14:26:54,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742169_1345 (size=30955) 2024-12-10T14:26:54,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:54,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T14:26:54,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:54,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:54,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:54,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840874450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840874451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840874452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840874452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840874452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,575 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:54,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,667 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/99ae69adccfc4aa6a9e83f06098dfadd 2024-12-10T14:26:54,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/b020a0daca464b8dae97a63001173745 is 50, key is test_row_0/B:col10/1733840813820/Put/seqid=0 2024-12-10T14:26:54,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742170_1346 (size=12001) 2024-12-10T14:26:54,728 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:54,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,881 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:54,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:54,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:54,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:54,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:54,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T14:26:54,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840874955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840874955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840874956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840874957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:54,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:54,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840874959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:55,033 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:55,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:55,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:55,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:55,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:26:55,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/b020a0daca464b8dae97a63001173745 2024-12-10T14:26:55,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/f4566ea8e194443d8afaa5661cb1523f is 50, key is test_row_0/C:col10/1733840813820/Put/seqid=0 2024-12-10T14:26:55,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742171_1347 (size=12001) 2024-12-10T14:26:55,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/f4566ea8e194443d8afaa5661cb1523f 2024-12-10T14:26:55,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/99ae69adccfc4aa6a9e83f06098dfadd as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/99ae69adccfc4aa6a9e83f06098dfadd 2024-12-10T14:26:55,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/99ae69adccfc4aa6a9e83f06098dfadd, entries=150, sequenceid=16, filesize=30.2 K 2024-12-10T14:26:55,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/b020a0daca464b8dae97a63001173745 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/b020a0daca464b8dae97a63001173745 2024-12-10T14:26:55,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/b020a0daca464b8dae97a63001173745, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T14:26:55,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/f4566ea8e194443d8afaa5661cb1523f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f4566ea8e194443d8afaa5661cb1523f 2024-12-10T14:26:55,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f4566ea8e194443d8afaa5661cb1523f, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T14:26:55,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1325ms, sequenceid=16, compaction requested=false 2024-12-10T14:26:55,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:55,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:55,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T14:26:55,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:55,187 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T14:26:55,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:26:55,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:55,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:26:55,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:55,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:26:55,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:55,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108592a459df1c4e4e8f3dceabbe315e61_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840813834/Put/seqid=0 2024-12-10T14:26:55,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742172_1348 (size=12154) 2024-12-10T14:26:55,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:55,201 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108592a459df1c4e4e8f3dceabbe315e61_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108592a459df1c4e4e8f3dceabbe315e61_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:55,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5beb3d89899b47438c9c3fafed78ae35, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:55,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5beb3d89899b47438c9c3fafed78ae35 is 175, key is test_row_0/A:col10/1733840813834/Put/seqid=0 2024-12-10T14:26:55,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742173_1349 (size=30955) 2024-12-10T14:26:55,494 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T14:26:55,618 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5beb3d89899b47438c9c3fafed78ae35 2024-12-10T14:26:55,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/a9586a8ed17c464fb6ad6889de946a70 is 50, key is test_row_0/B:col10/1733840813834/Put/seqid=0 2024-12-10T14:26:55,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742174_1350 (size=12001) 2024-12-10T14:26:55,644 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/a9586a8ed17c464fb6ad6889de946a70 2024-12-10T14:26:55,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/cfd908e470bd4d4eb6235be9e5d4ed55 is 50, key is test_row_0/C:col10/1733840813834/Put/seqid=0 2024-12-10T14:26:55,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742175_1351 (size=12001) 2024-12-10T14:26:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T14:26:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:55,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:55,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840875967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:55,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840875968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:55,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840875969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:55,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:55,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840875971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:55,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840875972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,058 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/cfd908e470bd4d4eb6235be9e5d4ed55 2024-12-10T14:26:56,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5beb3d89899b47438c9c3fafed78ae35 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5beb3d89899b47438c9c3fafed78ae35 2024-12-10T14:26:56,071 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5beb3d89899b47438c9c3fafed78ae35, entries=150, sequenceid=40, filesize=30.2 K 2024-12-10T14:26:56,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/a9586a8ed17c464fb6ad6889de946a70 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a9586a8ed17c464fb6ad6889de946a70 2024-12-10T14:26:56,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840876074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840876074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840876075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840876075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,078 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a9586a8ed17c464fb6ad6889de946a70, entries=150, sequenceid=40, filesize=11.7 K 2024-12-10T14:26:56,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840876076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/cfd908e470bd4d4eb6235be9e5d4ed55 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/cfd908e470bd4d4eb6235be9e5d4ed55 2024-12-10T14:26:56,085 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/cfd908e470bd4d4eb6235be9e5d4ed55, entries=150, sequenceid=40, filesize=11.7 K 2024-12-10T14:26:56,086 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ff0e5baacca9aaf73756a7fad4bfbee2 in 900ms, sequenceid=40, compaction requested=false 2024-12-10T14:26:56,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:56,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:56,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-10T14:26:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-10T14:26:56,091 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-10T14:26:56,091 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2740 sec 2024-12-10T14:26:56,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.2810 sec 2024-12-10T14:26:56,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:56,281 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T14:26:56,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:26:56,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:56,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:26:56,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:56,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:26:56,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:56,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107582e020d79f441e85d7505d65c14d28_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840815971/Put/seqid=0 2024-12-10T14:26:56,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742176_1352 (size=17034) 2024-12-10T14:26:56,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840876304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840876304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840876306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840876306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840876309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840876412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840876414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840876414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840876414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840876415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840876617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840876618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840876618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840876619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840876620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,700 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:56,703 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107582e020d79f441e85d7505d65c14d28_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107582e020d79f441e85d7505d65c14d28_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:56,704 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/f489bdfd1efb4e4fb0aa1c45b48c55cc, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:56,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/f489bdfd1efb4e4fb0aa1c45b48c55cc is 175, key is test_row_0/A:col10/1733840815971/Put/seqid=0 2024-12-10T14:26:56,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742177_1353 (size=48139) 2024-12-10T14:26:56,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840876927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840876927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840876927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840876928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:56,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840876928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:57,109 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/f489bdfd1efb4e4fb0aa1c45b48c55cc 2024-12-10T14:26:57,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/7bcd24468f0d4131b1d9bfa65e763b03 is 50, key is test_row_0/B:col10/1733840815971/Put/seqid=0 2024-12-10T14:26:57,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742178_1354 (size=12001) 2024-12-10T14:26:57,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/7bcd24468f0d4131b1d9bfa65e763b03 2024-12-10T14:26:57,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/79b0063ff70c438eb4b64bed1e9522fe is 50, key is test_row_0/C:col10/1733840815971/Put/seqid=0 2024-12-10T14:26:57,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742179_1355 (size=12001) 2024-12-10T14:26:57,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:57,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840877432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:57,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:57,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840877433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:57,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:57,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840877433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:57,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:57,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840877434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:57,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:57,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840877435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:57,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/79b0063ff70c438eb4b64bed1e9522fe 2024-12-10T14:26:57,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/f489bdfd1efb4e4fb0aa1c45b48c55cc as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/f489bdfd1efb4e4fb0aa1c45b48c55cc 2024-12-10T14:26:57,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/f489bdfd1efb4e4fb0aa1c45b48c55cc, entries=250, sequenceid=54, filesize=47.0 K 2024-12-10T14:26:57,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/7bcd24468f0d4131b1d9bfa65e763b03 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/7bcd24468f0d4131b1d9bfa65e763b03 2024-12-10T14:26:57,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/7bcd24468f0d4131b1d9bfa65e763b03, entries=150, sequenceid=54, filesize=11.7 K 2024-12-10T14:26:57,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/79b0063ff70c438eb4b64bed1e9522fe as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/79b0063ff70c438eb4b64bed1e9522fe 2024-12-10T14:26:57,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/79b0063ff70c438eb4b64bed1e9522fe, entries=150, sequenceid=54, filesize=11.7 K 2024-12-10T14:26:57,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1275ms, sequenceid=54, compaction requested=true 2024-12-10T14:26:57,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:57,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:26:57,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:57,556 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:57,557 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:57,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:26:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:26:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:57,558 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:57,558 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/A is initiating minor compaction (all files) 2024-12-10T14:26:57,558 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/A in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:57,558 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:57,558 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/99ae69adccfc4aa6a9e83f06098dfadd, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5beb3d89899b47438c9c3fafed78ae35, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/f489bdfd1efb4e4fb0aa1c45b48c55cc] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=107.5 K 2024-12-10T14:26:57,558 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/B is initiating minor compaction (all files) 2024-12-10T14:26:57,558 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:57,558 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/B in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:57,558 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/99ae69adccfc4aa6a9e83f06098dfadd, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5beb3d89899b47438c9c3fafed78ae35, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/f489bdfd1efb4e4fb0aa1c45b48c55cc] 2024-12-10T14:26:57,559 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/b020a0daca464b8dae97a63001173745, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a9586a8ed17c464fb6ad6889de946a70, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/7bcd24468f0d4131b1d9bfa65e763b03] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=35.2 K 2024-12-10T14:26:57,559 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99ae69adccfc4aa6a9e83f06098dfadd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733840813820 2024-12-10T14:26:57,559 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b020a0daca464b8dae97a63001173745, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733840813820 2024-12-10T14:26:57,559 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a9586a8ed17c464fb6ad6889de946a70, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733840813834 2024-12-10T14:26:57,559 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5beb3d89899b47438c9c3fafed78ae35, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733840813834 2024-12-10T14:26:57,560 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bcd24468f0d4131b1d9bfa65e763b03, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733840815970 2024-12-10T14:26:57,560 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting f489bdfd1efb4e4fb0aa1c45b48c55cc, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733840815968 2024-12-10T14:26:57,566 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:57,567 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#B#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:57,568 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/2a5625659ef04dcab997dcba33dbe910 is 50, key is test_row_0/B:col10/1733840815971/Put/seqid=0 2024-12-10T14:26:57,570 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121015572c17cdf24777864483fe9cd608bb_ff0e5baacca9aaf73756a7fad4bfbee2 store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:57,572 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121015572c17cdf24777864483fe9cd608bb_ff0e5baacca9aaf73756a7fad4bfbee2, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:57,572 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121015572c17cdf24777864483fe9cd608bb_ff0e5baacca9aaf73756a7fad4bfbee2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:57,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742180_1356 (size=12104) 2024-12-10T14:26:57,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742181_1357 (size=4469) 2024-12-10T14:26:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T14:26:57,917 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-10T14:26:57,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:26:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-10T14:26:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:26:57,919 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:26:57,920 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:26:57,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:26:57,977 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/2a5625659ef04dcab997dcba33dbe910 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2a5625659ef04dcab997dcba33dbe910 2024-12-10T14:26:57,981 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#A#compaction#298 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:57,981 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/6d287e678b414efa8897c395b1a26938 is 175, key is test_row_0/A:col10/1733840815971/Put/seqid=0 2024-12-10T14:26:57,985 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/B of ff0e5baacca9aaf73756a7fad4bfbee2 into 2a5625659ef04dcab997dcba33dbe910(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:57,985 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:57,985 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/B, priority=13, startTime=1733840817556; duration=0sec 2024-12-10T14:26:57,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742182_1358 (size=31058) 2024-12-10T14:26:57,985 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:26:57,985 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:B 2024-12-10T14:26:57,985 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:26:57,986 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:26:57,986 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/C is initiating minor compaction (all files) 2024-12-10T14:26:57,986 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/C in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:57,986 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f4566ea8e194443d8afaa5661cb1523f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/cfd908e470bd4d4eb6235be9e5d4ed55, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/79b0063ff70c438eb4b64bed1e9522fe] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=35.2 K 2024-12-10T14:26:57,987 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f4566ea8e194443d8afaa5661cb1523f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733840813820 2024-12-10T14:26:57,987 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting cfd908e470bd4d4eb6235be9e5d4ed55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733840813834 2024-12-10T14:26:57,988 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 79b0063ff70c438eb4b64bed1e9522fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733840815970 2024-12-10T14:26:57,994 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#C#compaction#299 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:26:57,994 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/84d548d6bd7e47a9ad1c165323396955 is 50, key is test_row_0/C:col10/1733840815971/Put/seqid=0 2024-12-10T14:26:57,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742183_1359 (size=12104) 2024-12-10T14:26:58,002 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/84d548d6bd7e47a9ad1c165323396955 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/84d548d6bd7e47a9ad1c165323396955 2024-12-10T14:26:58,016 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/C of ff0e5baacca9aaf73756a7fad4bfbee2 into 84d548d6bd7e47a9ad1c165323396955(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:58,017 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:58,017 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/C, priority=13, startTime=1733840817558; duration=0sec 2024-12-10T14:26:58,017 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:58,017 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:C 2024-12-10T14:26:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:26:58,071 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:26:58,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T14:26:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:58,072 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T14:26:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:26:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:58,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:26:58,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:58,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:26:58,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:26:58,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101d6eb64cf53f45a6a56aca7cfafe6975_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840816305/Put/seqid=0 2024-12-10T14:26:58,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742184_1360 (size=12154) 2024-12-10T14:26:58,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:26:58,391 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/6d287e678b414efa8897c395b1a26938 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/6d287e678b414efa8897c395b1a26938 2024-12-10T14:26:58,405 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/A of ff0e5baacca9aaf73756a7fad4bfbee2 into 6d287e678b414efa8897c395b1a26938(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:26:58,405 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:58,405 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/A, priority=13, startTime=1733840817556; duration=0sec 2024-12-10T14:26:58,405 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:26:58,405 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:A 2024-12-10T14:26:58,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:58,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:26:58,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840878450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840878452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840878453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,459 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840878455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840878456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:26:58,489 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101d6eb64cf53f45a6a56aca7cfafe6975_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101d6eb64cf53f45a6a56aca7cfafe6975_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:26:58,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5ef7511a60214f2b8dd21b91cacc621b, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:26:58,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5ef7511a60214f2b8dd21b91cacc621b is 175, key is test_row_0/A:col10/1733840816305/Put/seqid=0 2024-12-10T14:26:58,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742185_1361 (size=30955) 2024-12-10T14:26:58,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:26:58,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840878557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840878558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840878558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840878560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840878562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840878761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840878761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840878762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840878765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:58,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840878766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:58,895 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5ef7511a60214f2b8dd21b91cacc621b 2024-12-10T14:26:58,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/67eb8a92ba0e40fd9f61414a763c352c is 50, key is test_row_0/B:col10/1733840816305/Put/seqid=0 2024-12-10T14:26:58,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742186_1362 (size=12001) 2024-12-10T14:26:59,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:26:59,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840879065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840879066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840879067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840879071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840879071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,314 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/67eb8a92ba0e40fd9f61414a763c352c 2024-12-10T14:26:59,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/20b2dc4af9d54c07b5333809096cf3be is 50, key is test_row_0/C:col10/1733840816305/Put/seqid=0 2024-12-10T14:26:59,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742187_1363 (size=12001) 2024-12-10T14:26:59,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840879569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840879570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840879573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840879575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:26:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840879577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:26:59,725 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/20b2dc4af9d54c07b5333809096cf3be 2024-12-10T14:26:59,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/5ef7511a60214f2b8dd21b91cacc621b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5ef7511a60214f2b8dd21b91cacc621b 2024-12-10T14:26:59,733 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5ef7511a60214f2b8dd21b91cacc621b, entries=150, sequenceid=78, filesize=30.2 K 2024-12-10T14:26:59,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/67eb8a92ba0e40fd9f61414a763c352c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/67eb8a92ba0e40fd9f61414a763c352c 2024-12-10T14:26:59,737 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/67eb8a92ba0e40fd9f61414a763c352c, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T14:26:59,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/20b2dc4af9d54c07b5333809096cf3be as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/20b2dc4af9d54c07b5333809096cf3be 2024-12-10T14:26:59,741 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/20b2dc4af9d54c07b5333809096cf3be, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T14:26:59,742 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1670ms, sequenceid=78, compaction requested=false 2024-12-10T14:26:59,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:26:59,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:26:59,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-10T14:26:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-10T14:26:59,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-10T14:26:59,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8230 sec 2024-12-10T14:26:59,746 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.8270 sec 2024-12-10T14:27:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T14:27:00,022 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-10T14:27:00,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-10T14:27:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T14:27:00,025 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:00,025 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:00,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T14:27:00,179 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:00,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T14:27:00,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:00,179 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:27:00,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:00,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:00,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:00,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:00,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:00,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:00,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121058b8cd97da5a4538ad34173ff45715a3_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840818451/Put/seqid=0 2024-12-10T14:27:00,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742188_1364 (size=12154) 2024-12-10T14:27:00,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T14:27:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:00,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:00,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:00,594 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121058b8cd97da5a4538ad34173ff45715a3_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121058b8cd97da5a4538ad34173ff45715a3_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:00,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bc2e580e6ecf4462a92cc5759bdc1615, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bc2e580e6ecf4462a92cc5759bdc1615 is 175, key is test_row_0/A:col10/1733840818451/Put/seqid=0 2024-12-10T14:27:00,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742189_1365 (size=30955) 2024-12-10T14:27:00,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840880601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840880602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840880605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840880606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840880608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T14:27:00,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840880709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840880709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840880711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840880715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840880717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840880914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840880915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840880915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840880920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:00,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840880921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,000 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bc2e580e6ecf4462a92cc5759bdc1615 2024-12-10T14:27:01,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/8336c71443b5413383ad36d0840f763a is 50, key is test_row_0/B:col10/1733840818451/Put/seqid=0 2024-12-10T14:27:01,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742190_1366 (size=12001) 2024-12-10T14:27:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T14:27:01,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840881218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840881218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840881218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840881224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840881228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,411 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/8336c71443b5413383ad36d0840f763a 2024-12-10T14:27:01,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/3839e4358ef34d4899905b6e587e4bff is 50, key is test_row_0/C:col10/1733840818451/Put/seqid=0 2024-12-10T14:27:01,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742191_1367 (size=12001) 2024-12-10T14:27:01,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840881726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840881726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840881727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840881731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:01,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840881733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:01,823 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/3839e4358ef34d4899905b6e587e4bff 2024-12-10T14:27:01,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bc2e580e6ecf4462a92cc5759bdc1615 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bc2e580e6ecf4462a92cc5759bdc1615 2024-12-10T14:27:01,832 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bc2e580e6ecf4462a92cc5759bdc1615, entries=150, sequenceid=94, filesize=30.2 K 2024-12-10T14:27:01,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/8336c71443b5413383ad36d0840f763a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/8336c71443b5413383ad36d0840f763a 2024-12-10T14:27:01,836 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/8336c71443b5413383ad36d0840f763a, entries=150, sequenceid=94, filesize=11.7 K 2024-12-10T14:27:01,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/3839e4358ef34d4899905b6e587e4bff as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3839e4358ef34d4899905b6e587e4bff 2024-12-10T14:27:01,841 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3839e4358ef34d4899905b6e587e4bff, entries=150, sequenceid=94, filesize=11.7 K 2024-12-10T14:27:01,842 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1663ms, sequenceid=94, compaction requested=true 2024-12-10T14:27:01,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:01,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:01,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-10T14:27:01,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-10T14:27:01,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-10T14:27:01,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8170 sec 2024-12-10T14:27:01,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.8200 sec 2024-12-10T14:27:02,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T14:27:02,129 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-10T14:27:02,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-10T14:27:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T14:27:02,131 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:02,132 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:02,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:02,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T14:27:02,283 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:02,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-10T14:27:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:02,284 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-10T14:27:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:02,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:02,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101ca2324d935f4b619d432d6617b38149_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840820605/Put/seqid=0 2024-12-10T14:27:02,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742192_1368 (size=12154) 2024-12-10T14:27:02,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T14:27:02,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:02,702 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101ca2324d935f4b619d432d6617b38149_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101ca2324d935f4b619d432d6617b38149_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:02,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/88cf6161d29d4ae790f1fddaa77c1dd2, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:02,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/88cf6161d29d4ae790f1fddaa77c1dd2 is 175, key is test_row_0/A:col10/1733840820605/Put/seqid=0 2024-12-10T14:27:02,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742193_1369 (size=30955) 2024-12-10T14:27:02,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T14:27:02,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:02,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:02,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840882778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840882778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840882778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840882778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840882779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840882885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840882886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840882886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840882886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:02,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840882886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840883091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840883091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840883091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840883092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840883093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,108 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=115, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/88cf6161d29d4ae790f1fddaa77c1dd2 2024-12-10T14:27:03,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/27b8973bbce9429c8a4324faa3abe284 is 50, key is test_row_0/B:col10/1733840820605/Put/seqid=0 2024-12-10T14:27:03,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742194_1370 (size=12001) 2024-12-10T14:27:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T14:27:03,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840883396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840883396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840883397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840883397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840883397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,520 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/27b8973bbce9429c8a4324faa3abe284 2024-12-10T14:27:03,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/c0c147feb2d049db9f44db83f3069612 is 50, key is test_row_0/C:col10/1733840820605/Put/seqid=0 2024-12-10T14:27:03,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742195_1371 (size=12001) 2024-12-10T14:27:03,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840883901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840883901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840883903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840883903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:03,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840883904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:03,931 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/c0c147feb2d049db9f44db83f3069612 2024-12-10T14:27:03,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/88cf6161d29d4ae790f1fddaa77c1dd2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/88cf6161d29d4ae790f1fddaa77c1dd2 2024-12-10T14:27:03,938 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/88cf6161d29d4ae790f1fddaa77c1dd2, entries=150, sequenceid=115, filesize=30.2 K 2024-12-10T14:27:03,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/27b8973bbce9429c8a4324faa3abe284 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/27b8973bbce9429c8a4324faa3abe284 2024-12-10T14:27:03,942 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/27b8973bbce9429c8a4324faa3abe284, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T14:27:03,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/c0c147feb2d049db9f44db83f3069612 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/c0c147feb2d049db9f44db83f3069612 2024-12-10T14:27:03,946 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/c0c147feb2d049db9f44db83f3069612, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T14:27:03,947 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1663ms, sequenceid=115, compaction requested=true 2024-12-10T14:27:03,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:03,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:03,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-10T14:27:03,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-10T14:27:03,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-10T14:27:03,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8160 sec 2024-12-10T14:27:03,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.8190 sec 2024-12-10T14:27:04,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T14:27:04,235 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-10T14:27:04,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:04,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-10T14:27:04,237 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:04,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:27:04,238 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:04,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:27:04,389 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:04,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-10T14:27:04,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:04,390 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:27:04,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:04,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:04,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:04,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:04,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:04,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:04,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121085700cb6f5d14bc6afb4eb4ba6566d19_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840822777/Put/seqid=0 2024-12-10T14:27:04,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742196_1372 (size=12204) 2024-12-10T14:27:04,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:27:04,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:04,805 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121085700cb6f5d14bc6afb4eb4ba6566d19_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121085700cb6f5d14bc6afb4eb4ba6566d19_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:04,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/aef2ed92d0ae4ef9913584b2008a629d, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:04,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/aef2ed92d0ae4ef9913584b2008a629d is 175, key is test_row_0/A:col10/1733840822777/Put/seqid=0 2024-12-10T14:27:04,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742197_1373 (size=31005) 2024-12-10T14:27:04,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:27:04,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:04,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:04,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840884933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:04,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840884933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:04,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840884934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:04,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840884940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:04,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:04,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840884940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840885044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840885044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840885044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840885044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840885051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,210 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/aef2ed92d0ae4ef9913584b2008a629d 2024-12-10T14:27:05,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/ae61515a49a1476b926bd203ce47a948 is 50, key is test_row_0/B:col10/1733840822777/Put/seqid=0 2024-12-10T14:27:05,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742198_1374 (size=12051) 2024-12-10T14:27:05,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840885248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840885249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840885249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840885249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840885257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:27:05,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840885554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840885554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840885555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840885556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:05,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840885563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:05,621 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/ae61515a49a1476b926bd203ce47a948 2024-12-10T14:27:05,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/3b0299df9a374b53a025256b53ee9f6c is 50, key is test_row_0/C:col10/1733840822777/Put/seqid=0 2024-12-10T14:27:05,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742199_1375 (size=12051) 2024-12-10T14:27:06,031 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/3b0299df9a374b53a025256b53ee9f6c 2024-12-10T14:27:06,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/aef2ed92d0ae4ef9913584b2008a629d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/aef2ed92d0ae4ef9913584b2008a629d 2024-12-10T14:27:06,039 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/aef2ed92d0ae4ef9913584b2008a629d, entries=150, sequenceid=130, filesize=30.3 K 2024-12-10T14:27:06,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/ae61515a49a1476b926bd203ce47a948 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/ae61515a49a1476b926bd203ce47a948 2024-12-10T14:27:06,042 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/ae61515a49a1476b926bd203ce47a948, entries=150, sequenceid=130, filesize=11.8 K 2024-12-10T14:27:06,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/3b0299df9a374b53a025256b53ee9f6c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3b0299df9a374b53a025256b53ee9f6c 2024-12-10T14:27:06,046 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3b0299df9a374b53a025256b53ee9f6c, entries=150, sequenceid=130, filesize=11.8 K 2024-12-10T14:27:06,047 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1657ms, sequenceid=130, compaction requested=true 2024-12-10T14:27:06,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:06,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-10T14:27:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-10T14:27:06,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-10T14:27:06,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8100 sec 2024-12-10T14:27:06,050 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.8130 sec 2024-12-10T14:27:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:06,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T14:27:06,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:06,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:06,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:06,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:06,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:06,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:06,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ed8c3d83c76545c1b6e027b0b49caba0_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840824939/Put/seqid=0 2024-12-10T14:27:06,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742200_1376 (size=14794) 2024-12-10T14:27:06,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840886080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840886080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840886081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840886082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840886082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840886188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840886189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840886189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840886189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840886190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T14:27:06,341 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-10T14:27:06,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:06,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-10T14:27:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T14:27:06,344 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:06,344 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:06,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:06,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840886395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840886395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840886395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840886395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840886396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T14:27:06,475 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:06,479 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ed8c3d83c76545c1b6e027b0b49caba0_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ed8c3d83c76545c1b6e027b0b49caba0_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:06,480 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/c45c41ec2c7b44e482189a1c3a93c639, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:06,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/c45c41ec2c7b44e482189a1c3a93c639 is 175, key is test_row_0/A:col10/1733840824939/Put/seqid=0 2024-12-10T14:27:06,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742201_1377 (size=39749) 2024-12-10T14:27:06,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:06,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T14:27:06,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:06,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T14:27:06,648 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:06,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T14:27:06,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:06,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840886701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840886702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840886702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840886702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840886703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:06,801 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:06,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T14:27:06,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:06,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,885 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=153, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/c45c41ec2c7b44e482189a1c3a93c639 2024-12-10T14:27:06,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/5334089a7e13423a87dd1d8f4849d5a1 is 50, key is test_row_0/B:col10/1733840824939/Put/seqid=0 2024-12-10T14:27:06,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742202_1378 (size=12151) 2024-12-10T14:27:06,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/5334089a7e13423a87dd1d8f4849d5a1 2024-12-10T14:27:06,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/9056ebb34c6f4a5f901b8c7857eb4c44 is 50, key is test_row_0/C:col10/1733840824939/Put/seqid=0 2024-12-10T14:27:06,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742203_1379 (size=12151) 2024-12-10T14:27:06,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T14:27:06,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:06,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T14:27:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:06,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:06,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:07,106 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:07,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T14:27:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:07,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:07,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:07,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:07,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840887208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:07,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:07,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840887208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:07,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:07,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840887208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:07,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:07,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840887211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:07,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:07,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840887212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:07,259 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:07,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T14:27:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:07,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:07,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:07,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/9056ebb34c6f4a5f901b8c7857eb4c44 2024-12-10T14:27:07,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/c45c41ec2c7b44e482189a1c3a93c639 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/c45c41ec2c7b44e482189a1c3a93c639 2024-12-10T14:27:07,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/c45c41ec2c7b44e482189a1c3a93c639, entries=200, sequenceid=153, filesize=38.8 K 2024-12-10T14:27:07,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/5334089a7e13423a87dd1d8f4849d5a1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/5334089a7e13423a87dd1d8f4849d5a1 2024-12-10T14:27:07,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/5334089a7e13423a87dd1d8f4849d5a1, entries=150, sequenceid=153, filesize=11.9 K 2024-12-10T14:27:07,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/9056ebb34c6f4a5f901b8c7857eb4c44 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/9056ebb34c6f4a5f901b8c7857eb4c44 2024-12-10T14:27:07,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/9056ebb34c6f4a5f901b8c7857eb4c44, entries=150, sequenceid=153, filesize=11.9 K 2024-12-10T14:27:07,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1260ms, sequenceid=153, compaction requested=true 2024-12-10T14:27:07,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:07,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:07,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:07,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:07,322 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-10T14:27:07,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:07,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:07,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:07,322 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-10T14:27:07,324 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72309 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-10T14:27:07,324 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 194677 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-10T14:27:07,324 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/B is initiating minor compaction (all files) 2024-12-10T14:27:07,324 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/A is initiating minor compaction (all files) 2024-12-10T14:27:07,324 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/A in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,324 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/B in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,324 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/6d287e678b414efa8897c395b1a26938, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5ef7511a60214f2b8dd21b91cacc621b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bc2e580e6ecf4462a92cc5759bdc1615, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/88cf6161d29d4ae790f1fddaa77c1dd2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/aef2ed92d0ae4ef9913584b2008a629d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/c45c41ec2c7b44e482189a1c3a93c639] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=190.1 K 2024-12-10T14:27:07,324 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2a5625659ef04dcab997dcba33dbe910, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/67eb8a92ba0e40fd9f61414a763c352c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/8336c71443b5413383ad36d0840f763a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/27b8973bbce9429c8a4324faa3abe284, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/ae61515a49a1476b926bd203ce47a948, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/5334089a7e13423a87dd1d8f4849d5a1] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=70.6 K 2024-12-10T14:27:07,324 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,324 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/6d287e678b414efa8897c395b1a26938, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5ef7511a60214f2b8dd21b91cacc621b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bc2e580e6ecf4462a92cc5759bdc1615, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/88cf6161d29d4ae790f1fddaa77c1dd2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/aef2ed92d0ae4ef9913584b2008a629d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/c45c41ec2c7b44e482189a1c3a93c639] 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a5625659ef04dcab997dcba33dbe910, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733840815970 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d287e678b414efa8897c395b1a26938, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733840815970 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 67eb8a92ba0e40fd9f61414a763c352c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840816303 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ef7511a60214f2b8dd21b91cacc621b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840816303 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8336c71443b5413383ad36d0840f763a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733840818445 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc2e580e6ecf4462a92cc5759bdc1615, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733840818445 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 27b8973bbce9429c8a4324faa3abe284, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733840820601 2024-12-10T14:27:07,325 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88cf6161d29d4ae790f1fddaa77c1dd2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733840820601 2024-12-10T14:27:07,326 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ae61515a49a1476b926bd203ce47a948, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733840822748 2024-12-10T14:27:07,326 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting aef2ed92d0ae4ef9913584b2008a629d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733840822748 2024-12-10T14:27:07,326 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5334089a7e13423a87dd1d8f4849d5a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733840824932 2024-12-10T14:27:07,326 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting c45c41ec2c7b44e482189a1c3a93c639, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733840824932 2024-12-10T14:27:07,335 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:07,336 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210734454b3528e48658e8a1c7cf9c2b143_ff0e5baacca9aaf73756a7fad4bfbee2 store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:07,337 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#B#compaction#315 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:07,338 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/e1ec388d4a2c445fb2a07c5b725964d9 is 50, key is test_row_0/B:col10/1733840824939/Put/seqid=0 2024-12-10T14:27:07,339 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210734454b3528e48658e8a1c7cf9c2b143_ff0e5baacca9aaf73756a7fad4bfbee2, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:07,339 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210734454b3528e48658e8a1c7cf9c2b143_ff0e5baacca9aaf73756a7fad4bfbee2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:07,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742204_1380 (size=12459) 2024-12-10T14:27:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742205_1381 (size=4469) 2024-12-10T14:27:07,345 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#A#compaction#316 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:07,345 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/b94b88a8f8f14463addc039b815948a8 is 175, key is test_row_0/A:col10/1733840824939/Put/seqid=0 2024-12-10T14:27:07,348 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/e1ec388d4a2c445fb2a07c5b725964d9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e1ec388d4a2c445fb2a07c5b725964d9 2024-12-10T14:27:07,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742206_1382 (size=31413) 2024-12-10T14:27:07,353 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/B of ff0e5baacca9aaf73756a7fad4bfbee2 into e1ec388d4a2c445fb2a07c5b725964d9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:07,353 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:07,353 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/B, priority=10, startTime=1733840827322; duration=0sec 2024-12-10T14:27:07,353 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:07,353 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:B 2024-12-10T14:27:07,353 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-10T14:27:07,355 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72309 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-10T14:27:07,356 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/C is initiating minor compaction (all files) 2024-12-10T14:27:07,356 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/C in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,356 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/84d548d6bd7e47a9ad1c165323396955, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/20b2dc4af9d54c07b5333809096cf3be, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3839e4358ef34d4899905b6e587e4bff, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/c0c147feb2d049db9f44db83f3069612, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3b0299df9a374b53a025256b53ee9f6c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/9056ebb34c6f4a5f901b8c7857eb4c44] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=70.6 K 2024-12-10T14:27:07,356 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 84d548d6bd7e47a9ad1c165323396955, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733840815970 2024-12-10T14:27:07,356 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 20b2dc4af9d54c07b5333809096cf3be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840816303 2024-12-10T14:27:07,356 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3839e4358ef34d4899905b6e587e4bff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733840818445 2024-12-10T14:27:07,357 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c0c147feb2d049db9f44db83f3069612, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733840820601 2024-12-10T14:27:07,357 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b0299df9a374b53a025256b53ee9f6c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733840822748 2024-12-10T14:27:07,357 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 9056ebb34c6f4a5f901b8c7857eb4c44, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733840824932 2024-12-10T14:27:07,368 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#C#compaction#317 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:07,369 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/36996be386204b83954fb3380e528ba2 is 50, key is test_row_0/C:col10/1733840824939/Put/seqid=0 2024-12-10T14:27:07,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742207_1383 (size=12459) 2024-12-10T14:27:07,377 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/36996be386204b83954fb3380e528ba2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/36996be386204b83954fb3380e528ba2 2024-12-10T14:27:07,380 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/C of ff0e5baacca9aaf73756a7fad4bfbee2 into 36996be386204b83954fb3380e528ba2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:07,380 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:07,380 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/C, priority=10, startTime=1733840827322; duration=0sec 2024-12-10T14:27:07,380 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:07,381 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:C 2024-12-10T14:27:07,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:07,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T14:27:07,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:07,413 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T14:27:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:07,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100abaa28ddedc436fa05dd7181c4f44f7_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840826081/Put/seqid=0 2024-12-10T14:27:07,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742208_1384 (size=12304) 2024-12-10T14:27:07,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:07,428 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100abaa28ddedc436fa05dd7181c4f44f7_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100abaa28ddedc436fa05dd7181c4f44f7_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:07,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bb5074dcc9414289854c295adc5d1f98, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:07,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bb5074dcc9414289854c295adc5d1f98 is 175, key is test_row_0/A:col10/1733840826081/Put/seqid=0 2024-12-10T14:27:07,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742209_1385 (size=31105) 2024-12-10T14:27:07,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T14:27:07,754 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/b94b88a8f8f14463addc039b815948a8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b94b88a8f8f14463addc039b815948a8 2024-12-10T14:27:07,759 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/A of ff0e5baacca9aaf73756a7fad4bfbee2 into b94b88a8f8f14463addc039b815948a8(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:07,760 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:07,760 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/A, priority=10, startTime=1733840827322; duration=0sec 2024-12-10T14:27:07,760 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:07,760 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:A 2024-12-10T14:27:07,834 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bb5074dcc9414289854c295adc5d1f98 2024-12-10T14:27:07,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/32614243a19c4787877349cc651efff7 is 50, key is test_row_0/B:col10/1733840826081/Put/seqid=0 2024-12-10T14:27:07,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742210_1386 (size=12151) 2024-12-10T14:27:07,845 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/32614243a19c4787877349cc651efff7 2024-12-10T14:27:07,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/f1cc03232b9b4f3d8c74ec15aaf29225 is 50, key is test_row_0/C:col10/1733840826081/Put/seqid=0 2024-12-10T14:27:07,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742211_1387 (size=12151) 2024-12-10T14:27:08,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:08,259 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/f1cc03232b9b4f3d8c74ec15aaf29225 2024-12-10T14:27:08,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/bb5074dcc9414289854c295adc5d1f98 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bb5074dcc9414289854c295adc5d1f98 2024-12-10T14:27:08,267 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bb5074dcc9414289854c295adc5d1f98, entries=150, sequenceid=168, filesize=30.4 K 2024-12-10T14:27:08,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/32614243a19c4787877349cc651efff7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/32614243a19c4787877349cc651efff7 2024-12-10T14:27:08,271 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/32614243a19c4787877349cc651efff7, entries=150, sequenceid=168, filesize=11.9 K 2024-12-10T14:27:08,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/f1cc03232b9b4f3d8c74ec15aaf29225 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f1cc03232b9b4f3d8c74ec15aaf29225 2024-12-10T14:27:08,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840888267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,275 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f1cc03232b9b4f3d8c74ec15aaf29225, entries=150, sequenceid=168, filesize=11.9 K 2024-12-10T14:27:08,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840888267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,276 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ff0e5baacca9aaf73756a7fad4bfbee2 in 862ms, sequenceid=168, compaction requested=false 2024-12-10T14:27:08,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:08,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:08,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-10T14:27:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:08,276 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T14:27:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-10T14:27:08,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:08,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:08,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:08,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:08,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:08,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:08,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-10T14:27:08,278 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9330 sec 2024-12-10T14:27:08,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.9360 sec 2024-12-10T14:27:08,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b9dfc2aed6514a069ed507127e06175f_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840828275/Put/seqid=0 2024-12-10T14:27:08,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742212_1388 (size=17284) 2024-12-10T14:27:08,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840888290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840888295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840888295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840888375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840888376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840888397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840888398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840888401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T14:27:08,447 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-10T14:27:08,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-10T14:27:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T14:27:08,450 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:08,451 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:08,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:08,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T14:27:08,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840888580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840888581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,602 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:08,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:08,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:08,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:08,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:08,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840888601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840888601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840888605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,694 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:08,698 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b9dfc2aed6514a069ed507127e06175f_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b9dfc2aed6514a069ed507127e06175f_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:08,698 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/b70f17dd3f0748cc8015388bf4312fdf, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:08,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/b70f17dd3f0748cc8015388bf4312fdf is 175, key is test_row_0/A:col10/1733840828275/Put/seqid=0 2024-12-10T14:27:08,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742213_1389 (size=48389) 2024-12-10T14:27:08,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T14:27:08,755 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:08,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:08,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:08,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840888884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840888886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840888905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:08,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:08,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:08,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:08,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:08,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:08,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840888907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:08,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840888911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:09,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T14:27:09,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:09,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:09,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:09,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,103 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=193, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/b70f17dd3f0748cc8015388bf4312fdf 2024-12-10T14:27:09,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/2835d810063849118e0c42bd23e73a81 is 50, key is test_row_0/B:col10/1733840828275/Put/seqid=0 2024-12-10T14:27:09,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742214_1390 (size=12151) 2024-12-10T14:27:09,212 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:09,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:09,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:09,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:09,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:09,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:09,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:09,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840889387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:09,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840889390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:09,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:09,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840889410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:09,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:09,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840889416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:09,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:09,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840889419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:09,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/2835d810063849118e0c42bd23e73a81 2024-12-10T14:27:09,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:09,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:09,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:09,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/0c2844c1ceae45b2bd773049c3190a4a is 50, key is test_row_0/C:col10/1733840828275/Put/seqid=0 2024-12-10T14:27:09,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742215_1391 (size=12151) 2024-12-10T14:27:09,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T14:27:09,670 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:09,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,823 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:09,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:09,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:09,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:09,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/0c2844c1ceae45b2bd773049c3190a4a 2024-12-10T14:27:09,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/b70f17dd3f0748cc8015388bf4312fdf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b70f17dd3f0748cc8015388bf4312fdf 2024-12-10T14:27:09,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b70f17dd3f0748cc8015388bf4312fdf, entries=250, sequenceid=193, filesize=47.3 K 2024-12-10T14:27:09,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/2835d810063849118e0c42bd23e73a81 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2835d810063849118e0c42bd23e73a81 2024-12-10T14:27:09,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2835d810063849118e0c42bd23e73a81, entries=150, sequenceid=193, filesize=11.9 K 2024-12-10T14:27:09,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/0c2844c1ceae45b2bd773049c3190a4a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0c2844c1ceae45b2bd773049c3190a4a 2024-12-10T14:27:09,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0c2844c1ceae45b2bd773049c3190a4a, entries=150, sequenceid=193, filesize=11.9 K 2024-12-10T14:27:09,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1664ms, sequenceid=193, compaction requested=true 2024-12-10T14:27:09,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:09,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:09,941 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:09,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:09,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:09,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:09,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:09,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:09,941 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:09,942 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:09,942 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110907 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:09,942 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/B is initiating minor compaction (all files) 2024-12-10T14:27:09,942 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/A is initiating minor compaction (all files) 2024-12-10T14:27:09,942 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/B in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,942 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/A in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,942 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e1ec388d4a2c445fb2a07c5b725964d9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/32614243a19c4787877349cc651efff7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2835d810063849118e0c42bd23e73a81] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=35.9 K 2024-12-10T14:27:09,942 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b94b88a8f8f14463addc039b815948a8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bb5074dcc9414289854c295adc5d1f98, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b70f17dd3f0748cc8015388bf4312fdf] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=108.3 K 2024-12-10T14:27:09,942 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,942 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b94b88a8f8f14463addc039b815948a8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bb5074dcc9414289854c295adc5d1f98, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b70f17dd3f0748cc8015388bf4312fdf] 2024-12-10T14:27:09,942 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ec388d4a2c445fb2a07c5b725964d9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733840824932 2024-12-10T14:27:09,942 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b94b88a8f8f14463addc039b815948a8, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733840824932 2024-12-10T14:27:09,943 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 32614243a19c4787877349cc651efff7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733840826064 2024-12-10T14:27:09,943 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb5074dcc9414289854c295adc5d1f98, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733840826064 2024-12-10T14:27:09,943 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b70f17dd3f0748cc8015388bf4312fdf, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733840828265 2024-12-10T14:27:09,943 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 2835d810063849118e0c42bd23e73a81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733840828265 2024-12-10T14:27:09,948 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:09,949 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#B#compaction#324 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:09,949 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/959bc0429ff046a3a5c46f3d0a1b2de3 is 50, key is test_row_0/B:col10/1733840828275/Put/seqid=0 2024-12-10T14:27:09,951 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210ba29753f335a4902a09639a5a13deac9_ff0e5baacca9aaf73756a7fad4bfbee2 store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:09,953 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210ba29753f335a4902a09639a5a13deac9_ff0e5baacca9aaf73756a7fad4bfbee2, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:09,953 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ba29753f335a4902a09639a5a13deac9_ff0e5baacca9aaf73756a7fad4bfbee2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:09,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742216_1392 (size=12561) 2024-12-10T14:27:09,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742217_1393 (size=4469) 2024-12-10T14:27:09,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:09,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-10T14:27:09,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:09,976 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:27:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:09,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c87e7829bfb64159b37dfc69c2cbdf8d_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840828288/Put/seqid=0 2024-12-10T14:27:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742218_1394 (size=12304) 2024-12-10T14:27:10,365 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/959bc0429ff046a3a5c46f3d0a1b2de3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/959bc0429ff046a3a5c46f3d0a1b2de3 2024-12-10T14:27:10,368 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#A#compaction#325 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:10,368 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/98f6a9cc47e649fab4e7adbdf7b78638 is 175, key is test_row_0/A:col10/1733840828275/Put/seqid=0 2024-12-10T14:27:10,370 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/B of ff0e5baacca9aaf73756a7fad4bfbee2 into 959bc0429ff046a3a5c46f3d0a1b2de3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:10,370 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:10,370 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/B, priority=13, startTime=1733840829941; duration=0sec 2024-12-10T14:27:10,370 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:10,370 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:B 2024-12-10T14:27:10,370 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:10,371 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:10,371 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/C is initiating minor compaction (all files) 2024-12-10T14:27:10,371 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/C in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:10,371 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/36996be386204b83954fb3380e528ba2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f1cc03232b9b4f3d8c74ec15aaf29225, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0c2844c1ceae45b2bd773049c3190a4a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=35.9 K 2024-12-10T14:27:10,372 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 36996be386204b83954fb3380e528ba2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733840824932 2024-12-10T14:27:10,372 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f1cc03232b9b4f3d8c74ec15aaf29225, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733840826064 2024-12-10T14:27:10,372 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c2844c1ceae45b2bd773049c3190a4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733840828265 2024-12-10T14:27:10,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742219_1395 (size=31515) 2024-12-10T14:27:10,379 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/98f6a9cc47e649fab4e7adbdf7b78638 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/98f6a9cc47e649fab4e7adbdf7b78638 2024-12-10T14:27:10,379 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#C#compaction#327 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:10,380 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/da31547ac57e4538aa9b9959f88cfd70 is 50, key is test_row_0/C:col10/1733840828275/Put/seqid=0 2024-12-10T14:27:10,384 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/A of ff0e5baacca9aaf73756a7fad4bfbee2 into 98f6a9cc47e649fab4e7adbdf7b78638(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:10,384 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:10,384 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/A, priority=13, startTime=1733840829941; duration=0sec 2024-12-10T14:27:10,384 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:10,384 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:A 2024-12-10T14:27:10,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742220_1396 (size=12561) 2024-12-10T14:27:10,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:10,396 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c87e7829bfb64159b37dfc69c2cbdf8d_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c87e7829bfb64159b37dfc69c2cbdf8d_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:10,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/d8e0d7477c8244198c389d680fcd6129, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:10,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/d8e0d7477c8244198c389d680fcd6129 is 175, key is test_row_0/A:col10/1733840828288/Put/seqid=0 2024-12-10T14:27:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:10,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:10,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742221_1397 (size=31105) 2024-12-10T14:27:10,427 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=205, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/d8e0d7477c8244198c389d680fcd6129 2024-12-10T14:27:10,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/eaa3695d95dc4745b1cf6d884f899536 is 50, key is test_row_0/B:col10/1733840828288/Put/seqid=0 2024-12-10T14:27:10,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742222_1398 (size=12151) 2024-12-10T14:27:10,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840890443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840890442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840890454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840890454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840890454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T14:27:10,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840890555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840890555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840890563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840890564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840890564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840890760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840890761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840890771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840890771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:10,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840890772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:10,792 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/da31547ac57e4538aa9b9959f88cfd70 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/da31547ac57e4538aa9b9959f88cfd70 2024-12-10T14:27:10,796 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/C of ff0e5baacca9aaf73756a7fad4bfbee2 into da31547ac57e4538aa9b9959f88cfd70(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:10,796 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:10,796 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/C, priority=13, startTime=1733840829941; duration=0sec 2024-12-10T14:27:10,796 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:10,796 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:C 2024-12-10T14:27:10,843 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/eaa3695d95dc4745b1cf6d884f899536 2024-12-10T14:27:10,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/989d3647cb774043bfc1810ceeb9355f is 50, key is test_row_0/C:col10/1733840828288/Put/seqid=0 2024-12-10T14:27:10,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742223_1399 (size=12151) 2024-12-10T14:27:11,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840891066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840891070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840891077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840891077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840891078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,254 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/989d3647cb774043bfc1810ceeb9355f 2024-12-10T14:27:11,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/d8e0d7477c8244198c389d680fcd6129 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/d8e0d7477c8244198c389d680fcd6129 2024-12-10T14:27:11,262 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/d8e0d7477c8244198c389d680fcd6129, entries=150, sequenceid=205, filesize=30.4 K 2024-12-10T14:27:11,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/eaa3695d95dc4745b1cf6d884f899536 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/eaa3695d95dc4745b1cf6d884f899536 2024-12-10T14:27:11,266 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/eaa3695d95dc4745b1cf6d884f899536, entries=150, sequenceid=205, filesize=11.9 K 2024-12-10T14:27:11,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/989d3647cb774043bfc1810ceeb9355f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/989d3647cb774043bfc1810ceeb9355f 2024-12-10T14:27:11,269 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/989d3647cb774043bfc1810ceeb9355f, entries=150, sequenceid=205, filesize=11.9 K 2024-12-10T14:27:11,270 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1294ms, sequenceid=205, compaction requested=false 2024-12-10T14:27:11,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:11,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:11,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-10T14:27:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-10T14:27:11,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-10T14:27:11,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8200 sec 2024-12-10T14:27:11,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.8240 sec 2024-12-10T14:27:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:11,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:27:11,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:11,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:11,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:11,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:11,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:11,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:11,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121067f71653ed904d7b8d371c5e5455a680_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840830450/Put/seqid=0 2024-12-10T14:27:11,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742224_1400 (size=14794) 2024-12-10T14:27:11,589 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:11,592 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121067f71653ed904d7b8d371c5e5455a680_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121067f71653ed904d7b8d371c5e5455a680_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:11,593 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/416b7a49864b4cf68d8f88eb85941764, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:11,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/416b7a49864b4cf68d8f88eb85941764 is 175, key is test_row_0/A:col10/1733840830450/Put/seqid=0 2024-12-10T14:27:11,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840891587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840891590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840891591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742225_1401 (size=39749) 2024-12-10T14:27:11,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840891593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840891594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840891696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840891696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840891700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840891700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840891900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840891901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840891905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:11,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840891905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:11,997 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/416b7a49864b4cf68d8f88eb85941764 2024-12-10T14:27:12,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/a248628e99fb4572a2c7b721916ec8d9 is 50, key is test_row_0/B:col10/1733840830450/Put/seqid=0 2024-12-10T14:27:12,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742226_1402 (size=12151) 2024-12-10T14:27:12,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840892205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840892207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840892209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840892210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/a248628e99fb4572a2c7b721916ec8d9 2024-12-10T14:27:12,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a56527e9342b41468f6f9eaf12931590 is 50, key is test_row_0/C:col10/1733840830450/Put/seqid=0 2024-12-10T14:27:12,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742227_1403 (size=12151) 2024-12-10T14:27:12,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T14:27:12,555 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-10T14:27:12,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:12,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-10T14:27:12,558 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:12,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-10T14:27:12,559 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:12,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:12,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58266 deadline: 1733840892599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-10T14:27:12,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:12,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-10T14:27:12,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:12,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840892710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:12,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:12,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:12,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:12,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:12,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840892715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840892715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:12,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840892718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:12,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a56527e9342b41468f6f9eaf12931590 2024-12-10T14:27:12,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/416b7a49864b4cf68d8f88eb85941764 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/416b7a49864b4cf68d8f88eb85941764 2024-12-10T14:27:12,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/416b7a49864b4cf68d8f88eb85941764, entries=200, sequenceid=234, filesize=38.8 K 2024-12-10T14:27:12,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/a248628e99fb4572a2c7b721916ec8d9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a248628e99fb4572a2c7b721916ec8d9 2024-12-10T14:27:12,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a248628e99fb4572a2c7b721916ec8d9, entries=150, sequenceid=234, filesize=11.9 K 2024-12-10T14:27:12,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a56527e9342b41468f6f9eaf12931590 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a56527e9342b41468f6f9eaf12931590 2024-12-10T14:27:12,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a56527e9342b41468f6f9eaf12931590, entries=150, sequenceid=234, filesize=11.9 K 2024-12-10T14:27:12,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1280ms, sequenceid=234, compaction requested=true 2024-12-10T14:27:12,857 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:12,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:12,858 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:12,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:12,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:12,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:12,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:12,858 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:12,858 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:12,859 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:12,859 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/A is initiating minor compaction (all files) 2024-12-10T14:27:12,859 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/A in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:12,859 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/98f6a9cc47e649fab4e7adbdf7b78638, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/d8e0d7477c8244198c389d680fcd6129, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/416b7a49864b4cf68d8f88eb85941764] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=100.0 K 2024-12-10T14:27:12,859 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:12,859 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/98f6a9cc47e649fab4e7adbdf7b78638, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/d8e0d7477c8244198c389d680fcd6129, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/416b7a49864b4cf68d8f88eb85941764] 2024-12-10T14:27:12,860 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:12,860 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98f6a9cc47e649fab4e7adbdf7b78638, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733840828265 2024-12-10T14:27:12,860 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/B is initiating minor compaction (all files) 2024-12-10T14:27:12,860 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/B in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:12,860 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/959bc0429ff046a3a5c46f3d0a1b2de3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/eaa3695d95dc4745b1cf6d884f899536, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a248628e99fb4572a2c7b721916ec8d9] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=36.0 K 2024-12-10T14:27:12,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-10T14:27:12,861 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 959bc0429ff046a3a5c46f3d0a1b2de3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733840828265 2024-12-10T14:27:12,861 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8e0d7477c8244198c389d680fcd6129, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733840828288 2024-12-10T14:27:12,861 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting eaa3695d95dc4745b1cf6d884f899536, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733840828288 2024-12-10T14:27:12,861 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 416b7a49864b4cf68d8f88eb85941764, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733840830439 2024-12-10T14:27:12,861 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a248628e99fb4572a2c7b721916ec8d9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733840830450 2024-12-10T14:27:12,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:12,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-10T14:27:12,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:12,865 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T14:27:12,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:12,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:12,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:12,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:12,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:12,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:12,870 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#B#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:12,870 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/206053bf91bb4f33b50c282fb706b44c is 50, key is test_row_0/B:col10/1733840830450/Put/seqid=0 2024-12-10T14:27:12,876 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:12,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102531711c16a748179842df47618ca960_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840831590/Put/seqid=0 2024-12-10T14:27:12,881 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121085d68b83767e46b6b384607960975a5e_ff0e5baacca9aaf73756a7fad4bfbee2 store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:12,883 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121085d68b83767e46b6b384607960975a5e_ff0e5baacca9aaf73756a7fad4bfbee2, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:12,883 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121085d68b83767e46b6b384607960975a5e_ff0e5baacca9aaf73756a7fad4bfbee2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:12,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742229_1405 (size=12304) 2024-12-10T14:27:12,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742228_1404 (size=12663) 2024-12-10T14:27:12,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742230_1406 (size=4469) 2024-12-10T14:27:12,891 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#A#compaction#334 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:12,892 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/05f4f884417c4c018c78ca8beef0dd47 is 175, key is test_row_0/A:col10/1733840830450/Put/seqid=0 2024-12-10T14:27:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742231_1407 (size=31617) 2024-12-10T14:27:12,900 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/05f4f884417c4c018c78ca8beef0dd47 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/05f4f884417c4c018c78ca8beef0dd47 2024-12-10T14:27:12,904 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/A of ff0e5baacca9aaf73756a7fad4bfbee2 into 05f4f884417c4c018c78ca8beef0dd47(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:12,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:12,905 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/A, priority=13, startTime=1733840832858; duration=0sec 2024-12-10T14:27:12,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:12,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:A 2024-12-10T14:27:12,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:12,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:12,905 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): ff0e5baacca9aaf73756a7fad4bfbee2/C is initiating minor compaction (all files) 2024-12-10T14:27:12,905 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ff0e5baacca9aaf73756a7fad4bfbee2/C in TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:12,906 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/da31547ac57e4538aa9b9959f88cfd70, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/989d3647cb774043bfc1810ceeb9355f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a56527e9342b41468f6f9eaf12931590] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp, totalSize=36.0 K 2024-12-10T14:27:12,906 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting da31547ac57e4538aa9b9959f88cfd70, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733840828265 2024-12-10T14:27:12,906 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 989d3647cb774043bfc1810ceeb9355f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733840828288 2024-12-10T14:27:12,907 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a56527e9342b41468f6f9eaf12931590, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733840830450 2024-12-10T14:27:12,915 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff0e5baacca9aaf73756a7fad4bfbee2#C#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:12,915 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a96998f0b70d4115857ee65ce2b75ae0 is 50, key is test_row_0/C:col10/1733840830450/Put/seqid=0 2024-12-10T14:27:12,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742232_1408 (size=12663) 2024-12-10T14:27:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-10T14:27:13,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:13,293 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102531711c16a748179842df47618ca960_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102531711c16a748179842df47618ca960_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:13,294 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/206053bf91bb4f33b50c282fb706b44c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/206053bf91bb4f33b50c282fb706b44c 2024-12-10T14:27:13,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/a4bee8038cdd47bcb94f1147a8d8a2bb, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:13,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/a4bee8038cdd47bcb94f1147a8d8a2bb is 175, key is test_row_0/A:col10/1733840831590/Put/seqid=0 2024-12-10T14:27:13,303 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/B of ff0e5baacca9aaf73756a7fad4bfbee2 into 206053bf91bb4f33b50c282fb706b44c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:13,303 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:13,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742233_1409 (size=31105) 2024-12-10T14:27:13,303 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/B, priority=13, startTime=1733840832858; duration=0sec 2024-12-10T14:27:13,303 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:13,303 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:B 2024-12-10T14:27:13,304 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/a4bee8038cdd47bcb94f1147a8d8a2bb 2024-12-10T14:27:13,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/e8370c0984e542b2a33fbcbdebb6ecba is 50, key is test_row_0/B:col10/1733840831590/Put/seqid=0 2024-12-10T14:27:13,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742234_1410 (size=12151) 2024-12-10T14:27:13,323 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a96998f0b70d4115857ee65ce2b75ae0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a96998f0b70d4115857ee65ce2b75ae0 2024-12-10T14:27:13,327 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ff0e5baacca9aaf73756a7fad4bfbee2/C of ff0e5baacca9aaf73756a7fad4bfbee2 into a96998f0b70d4115857ee65ce2b75ae0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:13,327 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:13,327 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2., storeName=ff0e5baacca9aaf73756a7fad4bfbee2/C, priority=13, startTime=1733840832858; duration=0sec 2024-12-10T14:27:13,327 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:13,327 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:C 2024-12-10T14:27:13,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-10T14:27:13,718 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/e8370c0984e542b2a33fbcbdebb6ecba 2024-12-10T14:27:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:13,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. as already flushing 2024-12-10T14:27:13,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/0eade79e517a4c84a336855bef1d0d82 is 50, key is test_row_0/C:col10/1733840831590/Put/seqid=0 2024-12-10T14:27:13,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742235_1411 (size=12151) 2024-12-10T14:27:13,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840893749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:13,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840893754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840893753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:13,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840893755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:13,813 DEBUG [Thread-1565 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68035c67 to 127.0.0.1:58494 2024-12-10T14:27:13,813 DEBUG [Thread-1565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:13,814 DEBUG [Thread-1563 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:58494 2024-12-10T14:27:13,814 DEBUG [Thread-1563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:13,817 DEBUG [Thread-1561 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:58494 2024-12-10T14:27:13,817 DEBUG [Thread-1561 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:13,817 DEBUG [Thread-1569 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x59bd764a to 127.0.0.1:58494 2024-12-10T14:27:13,817 DEBUG [Thread-1569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:13,817 DEBUG [Thread-1567 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3eab689a to 127.0.0.1:58494 2024-12-10T14:27:13,817 DEBUG [Thread-1567 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:13,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840893856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:13,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840893862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840893862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:13,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840893862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:14,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58214 deadline: 1733840894059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:14,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:14,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:14,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58234 deadline: 1733840894063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:14,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58270 deadline: 1733840894063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:14,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:14,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58200 deadline: 1733840894064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:14,130 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/0eade79e517a4c84a336855bef1d0d82 2024-12-10T14:27:14,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/a4bee8038cdd47bcb94f1147a8d8a2bb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/a4bee8038cdd47bcb94f1147a8d8a2bb 2024-12-10T14:27:14,136 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/a4bee8038cdd47bcb94f1147a8d8a2bb, entries=150, sequenceid=244, filesize=30.4 K 2024-12-10T14:27:14,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/e8370c0984e542b2a33fbcbdebb6ecba as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e8370c0984e542b2a33fbcbdebb6ecba 2024-12-10T14:27:14,139 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e8370c0984e542b2a33fbcbdebb6ecba, entries=150, sequenceid=244, filesize=11.9 K 2024-12-10T14:27:14,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/0eade79e517a4c84a336855bef1d0d82 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0eade79e517a4c84a336855bef1d0d82 2024-12-10T14:27:14,142 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0eade79e517a4c84a336855bef1d0d82, entries=150, sequenceid=244, filesize=11.9 K 2024-12-10T14:27:14,143 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1277ms, sequenceid=244, compaction requested=false 2024-12-10T14:27:14,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:14,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:14,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-10T14:27:14,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-10T14:27:14,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-10T14:27:14,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5840 sec 2024-12-10T14:27:14,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.5890 sec 2024-12-10T14:27:14,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:14,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T14:27:14,364 DEBUG [Thread-1552 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:58494 2024-12-10T14:27:14,364 DEBUG [Thread-1552 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:14,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:14,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:14,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:14,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:14,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:14,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:14,366 DEBUG [Thread-1556 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:58494 2024-12-10T14:27:14,366 DEBUG [Thread-1556 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:14,367 DEBUG [Thread-1554 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f94d721 to 127.0.0.1:58494 2024-12-10T14:27:14,367 DEBUG [Thread-1554 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:14,367 DEBUG [Thread-1550 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:58494 2024-12-10T14:27:14,367 DEBUG [Thread-1550 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:14,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121077a662bd7746469a96ab98df85018e79_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_0/A:col10/1733840833752/Put/seqid=0 2024-12-10T14:27:14,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742236_1412 (size=12454) 2024-12-10T14:27:14,606 DEBUG [Thread-1558 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:58494 2024-12-10T14:27:14,606 DEBUG [Thread-1558 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:14,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-10T14:27:14,663 INFO [Thread-1560 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2852 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8556 rows 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2843 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8529 rows 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2853 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8559 rows 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2846 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8538 rows 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2831 2024-12-10T14:27:14,663 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8493 rows 2024-12-10T14:27:14,663 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:27:14,663 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:58494 2024-12-10T14:27:14,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:14,665 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T14:27:14,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T14:27:14,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:14,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T14:27:14,669 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840834669"}]},"ts":"1733840834669"} 2024-12-10T14:27:14,670 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T14:27:14,672 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T14:27:14,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:27:14,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, UNASSIGN}] 2024-12-10T14:27:14,674 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, UNASSIGN 2024-12-10T14:27:14,675 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:14,675 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:27:14,675 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; CloseRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:27:14,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T14:27:14,774 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:14,777 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121077a662bd7746469a96ab98df85018e79_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121077a662bd7746469a96ab98df85018e79_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:14,777 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/9e6846015e704633b053da2b8e6f7647, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:14,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/9e6846015e704633b053da2b8e6f7647 is 175, key is test_row_0/A:col10/1733840833752/Put/seqid=0 2024-12-10T14:27:14,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742237_1413 (size=31255) 2024-12-10T14:27:14,826 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:14,827 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(124): Close ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:14,827 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:27:14,827 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1681): Closing ff0e5baacca9aaf73756a7fad4bfbee2, disabling compactions & flushes 2024-12-10T14:27:14,827 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T14:27:15,181 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/9e6846015e704633b053da2b8e6f7647 2024-12-10T14:27:15,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/57b7db9ec95c4bc68c39ae644c8d7bb5 is 50, key is test_row_0/B:col10/1733840833752/Put/seqid=0 2024-12-10T14:27:15,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742238_1414 (size=12301) 2024-12-10T14:27:15,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T14:27:15,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/57b7db9ec95c4bc68c39ae644c8d7bb5 2024-12-10T14:27:15,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a8259dc2907b42a6b7f5f6c83f1a66dd is 50, key is test_row_0/C:col10/1733840833752/Put/seqid=0 2024-12-10T14:27:15,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742239_1415 (size=12301) 2024-12-10T14:27:15,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T14:27:16,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a8259dc2907b42a6b7f5f6c83f1a66dd 2024-12-10T14:27:16,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/9e6846015e704633b053da2b8e6f7647 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/9e6846015e704633b053da2b8e6f7647 2024-12-10T14:27:16,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/9e6846015e704633b053da2b8e6f7647, entries=150, sequenceid=274, filesize=30.5 K 2024-12-10T14:27:16,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/57b7db9ec95c4bc68c39ae644c8d7bb5 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/57b7db9ec95c4bc68c39ae644c8d7bb5 2024-12-10T14:27:16,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/57b7db9ec95c4bc68c39ae644c8d7bb5, entries=150, sequenceid=274, filesize=12.0 K 2024-12-10T14:27:16,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/a8259dc2907b42a6b7f5f6c83f1a66dd as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a8259dc2907b42a6b7f5f6c83f1a66dd 2024-12-10T14:27:16,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a8259dc2907b42a6b7f5f6c83f1a66dd, entries=150, sequenceid=274, filesize=12.0 K 2024-12-10T14:27:16,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=26.84 KB/27480 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1648ms, sequenceid=274, compaction requested=true 2024-12-10T14:27:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:16,012 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:16,012 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:16,012 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. after waiting 0 ms 2024-12-10T14:27:16,012 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. because compaction request was cancelled 2024-12-10T14:27:16,012 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:16,012 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:A 2024-12-10T14:27:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:16,012 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. because compaction request was cancelled 2024-12-10T14:27:16,012 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:B 2024-12-10T14:27:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff0e5baacca9aaf73756a7fad4bfbee2:C, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:16,012 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. because compaction request was cancelled 2024-12-10T14:27:16,012 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(2837): Flushing ff0e5baacca9aaf73756a7fad4bfbee2 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:27:16,012 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff0e5baacca9aaf73756a7fad4bfbee2:C 2024-12-10T14:27:16,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:16,013 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=A 2024-12-10T14:27:16,013 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:16,013 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=B 2024-12-10T14:27:16,013 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:16,013 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ff0e5baacca9aaf73756a7fad4bfbee2, store=C 2024-12-10T14:27:16,013 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:16,017 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fda79c5cb28049fab091db77b931b0ca_ff0e5baacca9aaf73756a7fad4bfbee2 is 50, key is test_row_1/A:col10/1733840834605/Put/seqid=0 2024-12-10T14:27:16,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742240_1416 (size=9914) 2024-12-10T14:27:16,177 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:27:16,420 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:16,423 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fda79c5cb28049fab091db77b931b0ca_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fda79c5cb28049fab091db77b931b0ca_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:16,424 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/ce81196f2f414b9cab501d4a4ad140fb, store: [table=TestAcidGuarantees family=A region=ff0e5baacca9aaf73756a7fad4bfbee2] 2024-12-10T14:27:16,424 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/ce81196f2f414b9cab501d4a4ad140fb is 175, key is test_row_1/A:col10/1733840834605/Put/seqid=0 2024-12-10T14:27:16,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742241_1417 (size=22561) 2024-12-10T14:27:16,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T14:27:16,828 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=281, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/ce81196f2f414b9cab501d4a4ad140fb 2024-12-10T14:27:16,834 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/60a5dbb09e594c8ba4c12c76bebb5477 is 50, key is test_row_1/B:col10/1733840834605/Put/seqid=0 2024-12-10T14:27:16,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742242_1418 (size=9857) 2024-12-10T14:27:17,237 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/60a5dbb09e594c8ba4c12c76bebb5477 2024-12-10T14:27:17,243 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/03b1ab02757047be92c57bf6dcc390af is 50, key is test_row_1/C:col10/1733840834605/Put/seqid=0 2024-12-10T14:27:17,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742243_1419 (size=9857) 2024-12-10T14:27:17,646 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/03b1ab02757047be92c57bf6dcc390af 2024-12-10T14:27:17,650 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/A/ce81196f2f414b9cab501d4a4ad140fb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/ce81196f2f414b9cab501d4a4ad140fb 2024-12-10T14:27:17,652 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/ce81196f2f414b9cab501d4a4ad140fb, entries=100, sequenceid=281, filesize=22.0 K 2024-12-10T14:27:17,653 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/B/60a5dbb09e594c8ba4c12c76bebb5477 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/60a5dbb09e594c8ba4c12c76bebb5477 2024-12-10T14:27:17,656 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/60a5dbb09e594c8ba4c12c76bebb5477, entries=100, sequenceid=281, filesize=9.6 K 2024-12-10T14:27:17,656 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/.tmp/C/03b1ab02757047be92c57bf6dcc390af as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/03b1ab02757047be92c57bf6dcc390af 2024-12-10T14:27:17,659 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/03b1ab02757047be92c57bf6dcc390af, entries=100, sequenceid=281, filesize=9.6 K 2024-12-10T14:27:17,660 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for ff0e5baacca9aaf73756a7fad4bfbee2 in 1647ms, sequenceid=281, compaction requested=true 2024-12-10T14:27:17,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/99ae69adccfc4aa6a9e83f06098dfadd, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5beb3d89899b47438c9c3fafed78ae35, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/f489bdfd1efb4e4fb0aa1c45b48c55cc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/6d287e678b414efa8897c395b1a26938, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5ef7511a60214f2b8dd21b91cacc621b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bc2e580e6ecf4462a92cc5759bdc1615, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/88cf6161d29d4ae790f1fddaa77c1dd2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/aef2ed92d0ae4ef9913584b2008a629d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/c45c41ec2c7b44e482189a1c3a93c639, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b94b88a8f8f14463addc039b815948a8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bb5074dcc9414289854c295adc5d1f98, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b70f17dd3f0748cc8015388bf4312fdf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/98f6a9cc47e649fab4e7adbdf7b78638, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/d8e0d7477c8244198c389d680fcd6129, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/416b7a49864b4cf68d8f88eb85941764] to archive 2024-12-10T14:27:17,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:27:17,662 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/f489bdfd1efb4e4fb0aa1c45b48c55cc to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/f489bdfd1efb4e4fb0aa1c45b48c55cc 2024-12-10T14:27:17,662 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/99ae69adccfc4aa6a9e83f06098dfadd to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/99ae69adccfc4aa6a9e83f06098dfadd 2024-12-10T14:27:17,662 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5ef7511a60214f2b8dd21b91cacc621b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5ef7511a60214f2b8dd21b91cacc621b 2024-12-10T14:27:17,662 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/88cf6161d29d4ae790f1fddaa77c1dd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/88cf6161d29d4ae790f1fddaa77c1dd2 2024-12-10T14:27:17,662 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/6d287e678b414efa8897c395b1a26938 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/6d287e678b414efa8897c395b1a26938 2024-12-10T14:27:17,663 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bc2e580e6ecf4462a92cc5759bdc1615 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bc2e580e6ecf4462a92cc5759bdc1615 2024-12-10T14:27:17,663 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5beb3d89899b47438c9c3fafed78ae35 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/5beb3d89899b47438c9c3fafed78ae35 2024-12-10T14:27:17,663 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/aef2ed92d0ae4ef9913584b2008a629d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/aef2ed92d0ae4ef9913584b2008a629d 2024-12-10T14:27:17,664 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/c45c41ec2c7b44e482189a1c3a93c639 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/c45c41ec2c7b44e482189a1c3a93c639 2024-12-10T14:27:17,664 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bb5074dcc9414289854c295adc5d1f98 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/bb5074dcc9414289854c295adc5d1f98 2024-12-10T14:27:17,664 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b70f17dd3f0748cc8015388bf4312fdf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b70f17dd3f0748cc8015388bf4312fdf 2024-12-10T14:27:17,664 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b94b88a8f8f14463addc039b815948a8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/b94b88a8f8f14463addc039b815948a8 2024-12-10T14:27:17,664 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/d8e0d7477c8244198c389d680fcd6129 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/d8e0d7477c8244198c389d680fcd6129 2024-12-10T14:27:17,664 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/416b7a49864b4cf68d8f88eb85941764 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/416b7a49864b4cf68d8f88eb85941764 2024-12-10T14:27:17,664 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/98f6a9cc47e649fab4e7adbdf7b78638 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/98f6a9cc47e649fab4e7adbdf7b78638 2024-12-10T14:27:17,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/b020a0daca464b8dae97a63001173745, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a9586a8ed17c464fb6ad6889de946a70, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2a5625659ef04dcab997dcba33dbe910, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/7bcd24468f0d4131b1d9bfa65e763b03, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/67eb8a92ba0e40fd9f61414a763c352c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/8336c71443b5413383ad36d0840f763a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/27b8973bbce9429c8a4324faa3abe284, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/ae61515a49a1476b926bd203ce47a948, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e1ec388d4a2c445fb2a07c5b725964d9, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/5334089a7e13423a87dd1d8f4849d5a1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/32614243a19c4787877349cc651efff7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/959bc0429ff046a3a5c46f3d0a1b2de3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2835d810063849118e0c42bd23e73a81, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/eaa3695d95dc4745b1cf6d884f899536, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a248628e99fb4572a2c7b721916ec8d9] to archive 2024-12-10T14:27:17,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2a5625659ef04dcab997dcba33dbe910 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2a5625659ef04dcab997dcba33dbe910 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/b020a0daca464b8dae97a63001173745 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/b020a0daca464b8dae97a63001173745 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a9586a8ed17c464fb6ad6889de946a70 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a9586a8ed17c464fb6ad6889de946a70 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/7bcd24468f0d4131b1d9bfa65e763b03 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/7bcd24468f0d4131b1d9bfa65e763b03 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/27b8973bbce9429c8a4324faa3abe284 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/27b8973bbce9429c8a4324faa3abe284 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/8336c71443b5413383ad36d0840f763a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/8336c71443b5413383ad36d0840f763a 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/ae61515a49a1476b926bd203ce47a948 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/ae61515a49a1476b926bd203ce47a948 2024-12-10T14:27:17,668 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/67eb8a92ba0e40fd9f61414a763c352c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/67eb8a92ba0e40fd9f61414a763c352c 2024-12-10T14:27:17,669 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e1ec388d4a2c445fb2a07c5b725964d9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e1ec388d4a2c445fb2a07c5b725964d9 2024-12-10T14:27:17,669 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/5334089a7e13423a87dd1d8f4849d5a1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/5334089a7e13423a87dd1d8f4849d5a1 2024-12-10T14:27:17,669 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2835d810063849118e0c42bd23e73a81 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/2835d810063849118e0c42bd23e73a81 2024-12-10T14:27:17,669 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/959bc0429ff046a3a5c46f3d0a1b2de3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/959bc0429ff046a3a5c46f3d0a1b2de3 2024-12-10T14:27:17,670 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/32614243a19c4787877349cc651efff7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/32614243a19c4787877349cc651efff7 2024-12-10T14:27:17,670 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a248628e99fb4572a2c7b721916ec8d9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/a248628e99fb4572a2c7b721916ec8d9 2024-12-10T14:27:17,670 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/eaa3695d95dc4745b1cf6d884f899536 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/eaa3695d95dc4745b1cf6d884f899536 2024-12-10T14:27:17,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f4566ea8e194443d8afaa5661cb1523f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/cfd908e470bd4d4eb6235be9e5d4ed55, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/84d548d6bd7e47a9ad1c165323396955, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/79b0063ff70c438eb4b64bed1e9522fe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/20b2dc4af9d54c07b5333809096cf3be, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3839e4358ef34d4899905b6e587e4bff, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/c0c147feb2d049db9f44db83f3069612, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3b0299df9a374b53a025256b53ee9f6c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/36996be386204b83954fb3380e528ba2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/9056ebb34c6f4a5f901b8c7857eb4c44, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f1cc03232b9b4f3d8c74ec15aaf29225, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/da31547ac57e4538aa9b9959f88cfd70, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0c2844c1ceae45b2bd773049c3190a4a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/989d3647cb774043bfc1810ceeb9355f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a56527e9342b41468f6f9eaf12931590] to archive 2024-12-10T14:27:17,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f4566ea8e194443d8afaa5661cb1523f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f4566ea8e194443d8afaa5661cb1523f 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/84d548d6bd7e47a9ad1c165323396955 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/84d548d6bd7e47a9ad1c165323396955 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3b0299df9a374b53a025256b53ee9f6c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3b0299df9a374b53a025256b53ee9f6c 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3839e4358ef34d4899905b6e587e4bff to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/3839e4358ef34d4899905b6e587e4bff 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/20b2dc4af9d54c07b5333809096cf3be to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/20b2dc4af9d54c07b5333809096cf3be 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/79b0063ff70c438eb4b64bed1e9522fe to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/79b0063ff70c438eb4b64bed1e9522fe 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/cfd908e470bd4d4eb6235be9e5d4ed55 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/cfd908e470bd4d4eb6235be9e5d4ed55 2024-12-10T14:27:17,673 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/c0c147feb2d049db9f44db83f3069612 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/c0c147feb2d049db9f44db83f3069612 2024-12-10T14:27:17,674 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/36996be386204b83954fb3380e528ba2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/36996be386204b83954fb3380e528ba2 2024-12-10T14:27:17,674 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/da31547ac57e4538aa9b9959f88cfd70 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/da31547ac57e4538aa9b9959f88cfd70 2024-12-10T14:27:17,675 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/9056ebb34c6f4a5f901b8c7857eb4c44 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/9056ebb34c6f4a5f901b8c7857eb4c44 2024-12-10T14:27:17,675 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/989d3647cb774043bfc1810ceeb9355f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/989d3647cb774043bfc1810ceeb9355f 2024-12-10T14:27:17,675 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a56527e9342b41468f6f9eaf12931590 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a56527e9342b41468f6f9eaf12931590 2024-12-10T14:27:17,675 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0c2844c1ceae45b2bd773049c3190a4a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0c2844c1ceae45b2bd773049c3190a4a 2024-12-10T14:27:17,675 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f1cc03232b9b4f3d8c74ec15aaf29225 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/f1cc03232b9b4f3d8c74ec15aaf29225 2024-12-10T14:27:17,678 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/recovered.edits/284.seqid, newMaxSeqId=284, maxSeqId=4 2024-12-10T14:27:17,678 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2. 2024-12-10T14:27:17,678 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1635): Region close journal for ff0e5baacca9aaf73756a7fad4bfbee2: 2024-12-10T14:27:17,679 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(170): Closed ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:17,680 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=ff0e5baacca9aaf73756a7fad4bfbee2, regionState=CLOSED 2024-12-10T14:27:17,681 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-10T14:27:17,681 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseRegionProcedure ff0e5baacca9aaf73756a7fad4bfbee2, server=db1d50717577,46699,1733840717757 in 3.0050 sec 2024-12-10T14:27:17,682 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=119 2024-12-10T14:27:17,682 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=119, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ff0e5baacca9aaf73756a7fad4bfbee2, UNASSIGN in 3.0070 sec 2024-12-10T14:27:17,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-10T14:27:17,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 3.0100 sec 2024-12-10T14:27:17,684 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840837684"}]},"ts":"1733840837684"} 2024-12-10T14:27:17,685 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T14:27:17,688 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T14:27:17,689 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 3.0220 sec 2024-12-10T14:27:18,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T14:27:18,773 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-10T14:27:18,773 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T14:27:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:18,774 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:18,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T14:27:18,775 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=122, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:18,776 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,777 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/recovered.edits] 2024-12-10T14:27:18,780 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/9e6846015e704633b053da2b8e6f7647 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/9e6846015e704633b053da2b8e6f7647 2024-12-10T14:27:18,780 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/05f4f884417c4c018c78ca8beef0dd47 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/05f4f884417c4c018c78ca8beef0dd47 2024-12-10T14:27:18,780 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/ce81196f2f414b9cab501d4a4ad140fb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/ce81196f2f414b9cab501d4a4ad140fb 2024-12-10T14:27:18,780 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/a4bee8038cdd47bcb94f1147a8d8a2bb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/A/a4bee8038cdd47bcb94f1147a8d8a2bb 2024-12-10T14:27:18,783 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/206053bf91bb4f33b50c282fb706b44c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/206053bf91bb4f33b50c282fb706b44c 2024-12-10T14:27:18,783 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e8370c0984e542b2a33fbcbdebb6ecba to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/e8370c0984e542b2a33fbcbdebb6ecba 2024-12-10T14:27:18,783 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/60a5dbb09e594c8ba4c12c76bebb5477 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/60a5dbb09e594c8ba4c12c76bebb5477 2024-12-10T14:27:18,783 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/57b7db9ec95c4bc68c39ae644c8d7bb5 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/B/57b7db9ec95c4bc68c39ae644c8d7bb5 2024-12-10T14:27:18,786 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a8259dc2907b42a6b7f5f6c83f1a66dd to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a8259dc2907b42a6b7f5f6c83f1a66dd 2024-12-10T14:27:18,786 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a96998f0b70d4115857ee65ce2b75ae0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/a96998f0b70d4115857ee65ce2b75ae0 2024-12-10T14:27:18,786 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0eade79e517a4c84a336855bef1d0d82 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/0eade79e517a4c84a336855bef1d0d82 2024-12-10T14:27:18,786 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/03b1ab02757047be92c57bf6dcc390af to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/C/03b1ab02757047be92c57bf6dcc390af 2024-12-10T14:27:18,788 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/recovered.edits/284.seqid to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2/recovered.edits/284.seqid 2024-12-10T14:27:18,789 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,789 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T14:27:18,789 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T14:27:18,790 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T14:27:18,794 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100abaa28ddedc436fa05dd7181c4f44f7_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100abaa28ddedc436fa05dd7181c4f44f7_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,794 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101ca2324d935f4b619d432d6617b38149_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101ca2324d935f4b619d432d6617b38149_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,794 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121077a662bd7746469a96ab98df85018e79_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121077a662bd7746469a96ab98df85018e79_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,794 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102531711c16a748179842df47618ca960_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102531711c16a748179842df47618ca960_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,794 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101d6eb64cf53f45a6a56aca7cfafe6975_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101d6eb64cf53f45a6a56aca7cfafe6975_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,795 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121058b8cd97da5a4538ad34173ff45715a3_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121058b8cd97da5a4538ad34173ff45715a3_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,795 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121067f71653ed904d7b8d371c5e5455a680_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121067f71653ed904d7b8d371c5e5455a680_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,795 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107582e020d79f441e85d7505d65c14d28_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107582e020d79f441e85d7505d65c14d28_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,796 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c87e7829bfb64159b37dfc69c2cbdf8d_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c87e7829bfb64159b37dfc69c2cbdf8d_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,796 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108e01d9f9b71a40fa922997da2ca4679d_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108e01d9f9b71a40fa922997da2ca4679d_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,796 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121085700cb6f5d14bc6afb4eb4ba6566d19_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121085700cb6f5d14bc6afb4eb4ba6566d19_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,796 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fda79c5cb28049fab091db77b931b0ca_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fda79c5cb28049fab091db77b931b0ca_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,796 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108592a459df1c4e4e8f3dceabbe315e61_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108592a459df1c4e4e8f3dceabbe315e61_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,796 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ed8c3d83c76545c1b6e027b0b49caba0_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ed8c3d83c76545c1b6e027b0b49caba0_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,796 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b9dfc2aed6514a069ed507127e06175f_ff0e5baacca9aaf73756a7fad4bfbee2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b9dfc2aed6514a069ed507127e06175f_ff0e5baacca9aaf73756a7fad4bfbee2 2024-12-10T14:27:18,797 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T14:27:18,798 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=122, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:18,799 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T14:27:18,801 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T14:27:18,802 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=122, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:18,802 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T14:27:18,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733840838802"}]},"ts":"9223372036854775807"} 2024-12-10T14:27:18,803 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T14:27:18,803 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ff0e5baacca9aaf73756a7fad4bfbee2, NAME => 'TestAcidGuarantees,,1733840810756.ff0e5baacca9aaf73756a7fad4bfbee2.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T14:27:18,803 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T14:27:18,803 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733840838803"}]},"ts":"9223372036854775807"} 2024-12-10T14:27:18,804 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T14:27:18,806 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=122, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:18,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 33 msec 2024-12-10T14:27:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T14:27:18,875 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-10T14:27:18,885 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=244 (was 244), OpenFileDescriptor=453 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=284 (was 242) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2294 (was 2344) 2024-12-10T14:27:18,893 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=284, ProcessCount=11, AvailableMemoryMB=2294 2024-12-10T14:27:18,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:27:18,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:27:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:18,896 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:27:18,896 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:18,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 123 2024-12-10T14:27:18,897 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:27:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-12-10T14:27:18,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742244_1420 (size=963) 2024-12-10T14:27:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-12-10T14:27:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-12-10T14:27:19,303 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:27:19,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742245_1421 (size=53) 2024-12-10T14:27:19,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-12-10T14:27:19,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:27:19,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d2de72cdb20a8dd601845b8e001f941b, disabling compactions & flushes 2024-12-10T14:27:19,708 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:19,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:19,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. after waiting 0 ms 2024-12-10T14:27:19,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:19,708 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:19,708 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:19,709 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:27:19,709 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733840839709"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733840839709"}]},"ts":"1733840839709"} 2024-12-10T14:27:19,710 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:27:19,710 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:27:19,710 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840839710"}]},"ts":"1733840839710"} 2024-12-10T14:27:19,711 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T14:27:19,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2de72cdb20a8dd601845b8e001f941b, ASSIGN}] 2024-12-10T14:27:19,716 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2de72cdb20a8dd601845b8e001f941b, ASSIGN 2024-12-10T14:27:19,717 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2de72cdb20a8dd601845b8e001f941b, ASSIGN; state=OFFLINE, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=false 2024-12-10T14:27:19,867 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=d2de72cdb20a8dd601845b8e001f941b, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:19,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; OpenRegionProcedure d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:27:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-12-10T14:27:20,019 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:20,022 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:20,022 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7285): Opening region: {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:27:20,022 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,022 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:27:20,022 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7327): checking encryption for d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,022 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7330): checking classloading for d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,023 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,024 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:20,025 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2de72cdb20a8dd601845b8e001f941b columnFamilyName A 2024-12-10T14:27:20,025 DEBUG [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:20,025 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.HStore(327): Store=d2de72cdb20a8dd601845b8e001f941b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:20,025 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,026 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:20,026 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2de72cdb20a8dd601845b8e001f941b columnFamilyName B 2024-12-10T14:27:20,026 DEBUG [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:20,027 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.HStore(327): Store=d2de72cdb20a8dd601845b8e001f941b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:20,027 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,028 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:20,028 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2de72cdb20a8dd601845b8e001f941b columnFamilyName C 2024-12-10T14:27:20,028 DEBUG [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:20,028 INFO [StoreOpener-d2de72cdb20a8dd601845b8e001f941b-1 {}] regionserver.HStore(327): Store=d2de72cdb20a8dd601845b8e001f941b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:20,028 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:20,029 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,029 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,030 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:27:20,031 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1085): writing seq id for d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:20,032 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:27:20,032 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1102): Opened d2de72cdb20a8dd601845b8e001f941b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60870529, jitterRate=-0.09295843541622162}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:27:20,033 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1001): Region open journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:20,033 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., pid=125, masterSystemTime=1733840840019 2024-12-10T14:27:20,034 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:20,035 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:20,035 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=d2de72cdb20a8dd601845b8e001f941b, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:20,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-10T14:27:20,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; OpenRegionProcedure d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 in 168 msec 2024-12-10T14:27:20,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-12-10T14:27:20,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2de72cdb20a8dd601845b8e001f941b, ASSIGN in 322 msec 2024-12-10T14:27:20,038 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:27:20,038 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840840038"}]},"ts":"1733840840038"} 2024-12-10T14:27:20,039 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T14:27:20,042 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:27:20,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-12-10T14:27:21,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-12-10T14:27:21,000 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 123 completed 2024-12-10T14:27:21,001 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-12-10T14:27:21,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,006 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,007 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,007 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:27:21,008 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:27:21,010 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-12-10T14:27:21,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,013 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-12-10T14:27:21,015 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,016 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-12-10T14:27:21,020 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,020 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-12-10T14:27:21,023 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,023 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-12-10T14:27:21,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,026 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-12-10T14:27:21,028 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,029 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-12-10T14:27:21,033 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,033 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2405c04e to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76f0408 2024-12-10T14:27:21,036 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc5e114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,036 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73d92042 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c692575 2024-12-10T14:27:21,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e96b8ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,039 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x593af048 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1cbd2497 2024-12-10T14:27:21,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17e5a47d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:21,045 DEBUG [hconnection-0x60012b37-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,046 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52260, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:21,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-10T14:27:21,051 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:21,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T14:27:21,051 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:21,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:21,052 DEBUG [hconnection-0x36a01d02-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,053 DEBUG [hconnection-0x7d496b83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,053 DEBUG [hconnection-0x532c78fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,053 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,053 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,054 DEBUG [hconnection-0x2631425b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,054 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,054 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:21,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:27:21,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:21,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:21,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:21,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:21,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:21,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:21,060 DEBUG [hconnection-0x7cad2f44-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,060 DEBUG [hconnection-0x23b4d05-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,061 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52300, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,062 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,065 DEBUG [hconnection-0x36ad98c4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,066 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52302, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840901081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840901081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840901081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/81429b6c13a64ac5ba870ffde14384ac is 50, key is test_row_0/A:col10/1733840841053/Put/seqid=0 2024-12-10T14:27:21,083 DEBUG [hconnection-0x56e3b8fa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,083 DEBUG [hconnection-0x7572ca04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:21,084 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52312, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,084 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:21,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840901085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840901087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742246_1422 (size=12001) 2024-12-10T14:27:21,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T14:27:21,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840901182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840901182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840901183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840901186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840901188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,202 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:21,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T14:27:21,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:21,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:21,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:21,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840901385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840901385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840901387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840901389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840901389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,493 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/81429b6c13a64ac5ba870ffde14384ac 2024-12-10T14:27:21,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:21,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:21,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:21,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/2afc1c045f854314b324b2280e8f4e23 is 50, key is test_row_0/B:col10/1733840841053/Put/seqid=0 2024-12-10T14:27:21,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742247_1423 (size=12001) 2024-12-10T14:27:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T14:27:21,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:21,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:21,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:21,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840901687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840901688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840901691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840901693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840901694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:21,811 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:21,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:21,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:21,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/2afc1c045f854314b324b2280e8f4e23 2024-12-10T14:27:21,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/b6d9651cc3f14884887e252429cbb014 is 50, key is test_row_0/C:col10/1733840841053/Put/seqid=0 2024-12-10T14:27:21,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742248_1424 (size=12001) 2024-12-10T14:27:21,964 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:21,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:21,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:21,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:21,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:21,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:22,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:22,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:22,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:22,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:22,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:22,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:22,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:22,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T14:27:22,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:22,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840902192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:22,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:22,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840902193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:22,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:22,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840902195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:22,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:22,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840902196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:22,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:22,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840902197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:22,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:22,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:22,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:22,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:22,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:22,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:22,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:22,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:22,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/b6d9651cc3f14884887e252429cbb014 2024-12-10T14:27:22,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/81429b6c13a64ac5ba870ffde14384ac as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/81429b6c13a64ac5ba870ffde14384ac 2024-12-10T14:27:22,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/81429b6c13a64ac5ba870ffde14384ac, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T14:27:22,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/2afc1c045f854314b324b2280e8f4e23 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/2afc1c045f854314b324b2280e8f4e23 2024-12-10T14:27:22,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/2afc1c045f854314b324b2280e8f4e23, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T14:27:22,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/b6d9651cc3f14884887e252429cbb014 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b6d9651cc3f14884887e252429cbb014 2024-12-10T14:27:22,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b6d9651cc3f14884887e252429cbb014, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T14:27:22,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for d2de72cdb20a8dd601845b8e001f941b in 1310ms, sequenceid=13, compaction requested=false 2024-12-10T14:27:22,367 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-10T14:27:22,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:22,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:22,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T14:27:22,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:22,423 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:27:22,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:22,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:22,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:22,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:22,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:22,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:22,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/da485c7f31614619a535a6a39a629762 is 50, key is test_row_0/A:col10/1733840841080/Put/seqid=0 2024-12-10T14:27:22,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742249_1425 (size=12001) 2024-12-10T14:27:22,431 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/da485c7f31614619a535a6a39a629762 2024-12-10T14:27:22,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/1688992b2bce403ebc6fc616866e114f is 50, key is test_row_0/B:col10/1733840841080/Put/seqid=0 2024-12-10T14:27:22,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742250_1426 (size=12001) 2024-12-10T14:27:22,842 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/1688992b2bce403ebc6fc616866e114f 2024-12-10T14:27:22,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/26e9c1b60cd8424a85ff2223f72382f2 is 50, key is test_row_0/C:col10/1733840841080/Put/seqid=0 2024-12-10T14:27:22,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742251_1427 (size=12001) 2024-12-10T14:27:23,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T14:27:23,196 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T14:27:23,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:23,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:23,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840903208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840903209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840903210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840903211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840903212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,254 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/26e9c1b60cd8424a85ff2223f72382f2 2024-12-10T14:27:23,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/da485c7f31614619a535a6a39a629762 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/da485c7f31614619a535a6a39a629762 2024-12-10T14:27:23,261 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/da485c7f31614619a535a6a39a629762, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T14:27:23,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/1688992b2bce403ebc6fc616866e114f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/1688992b2bce403ebc6fc616866e114f 2024-12-10T14:27:23,265 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/1688992b2bce403ebc6fc616866e114f, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T14:27:23,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/26e9c1b60cd8424a85ff2223f72382f2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/26e9c1b60cd8424a85ff2223f72382f2 2024-12-10T14:27:23,269 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/26e9c1b60cd8424a85ff2223f72382f2, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T14:27:23,270 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for d2de72cdb20a8dd601845b8e001f941b in 847ms, sequenceid=39, compaction requested=false 2024-12-10T14:27:23,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:23,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:23,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-10T14:27:23,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-10T14:27:23,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-10T14:27:23,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2200 sec 2024-12-10T14:27:23,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 2.2250 sec 2024-12-10T14:27:23,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:23,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:27:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:23,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/cac19e98b4ae4b84a3cc1396e93666f0 is 50, key is test_row_0/A:col10/1733840843208/Put/seqid=0 2024-12-10T14:27:23,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742252_1428 (size=14341) 2024-12-10T14:27:23,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840903351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840903352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840903353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840903457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840903457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840903458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840903662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840903662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840903662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/cac19e98b4ae4b84a3cc1396e93666f0 2024-12-10T14:27:23,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/57a0883eca5c4ea683736cfe3c000713 is 50, key is test_row_0/B:col10/1733840843208/Put/seqid=0 2024-12-10T14:27:23,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742253_1429 (size=12001) 2024-12-10T14:27:23,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/57a0883eca5c4ea683736cfe3c000713 2024-12-10T14:27:23,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/35f8924a06464ae288ec6361ca84a5f5 is 50, key is test_row_0/C:col10/1733840843208/Put/seqid=0 2024-12-10T14:27:23,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742254_1430 (size=12001) 2024-12-10T14:27:23,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840903966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840903967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:23,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:23,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840903968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/35f8924a06464ae288ec6361ca84a5f5 2024-12-10T14:27:24,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/cac19e98b4ae4b84a3cc1396e93666f0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cac19e98b4ae4b84a3cc1396e93666f0 2024-12-10T14:27:24,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cac19e98b4ae4b84a3cc1396e93666f0, entries=200, sequenceid=52, filesize=14.0 K 2024-12-10T14:27:24,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/57a0883eca5c4ea683736cfe3c000713 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57a0883eca5c4ea683736cfe3c000713 2024-12-10T14:27:24,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57a0883eca5c4ea683736cfe3c000713, entries=150, sequenceid=52, filesize=11.7 K 2024-12-10T14:27:24,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/35f8924a06464ae288ec6361ca84a5f5 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/35f8924a06464ae288ec6361ca84a5f5 2024-12-10T14:27:24,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/35f8924a06464ae288ec6361ca84a5f5, entries=150, sequenceid=52, filesize=11.7 K 2024-12-10T14:27:24,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d2de72cdb20a8dd601845b8e001f941b in 845ms, sequenceid=52, compaction requested=true 2024-12-10T14:27:24,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:24,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:24,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:24,167 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:24,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:24,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:24,167 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:24,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:24,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:24,167 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:24,167 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:24,167 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/A is initiating minor compaction (all files) 2024-12-10T14:27:24,168 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/B is initiating minor compaction (all files) 2024-12-10T14:27:24,168 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/A in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:24,168 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/B in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:24,168 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/81429b6c13a64ac5ba870ffde14384ac, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/da485c7f31614619a535a6a39a629762, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cac19e98b4ae4b84a3cc1396e93666f0] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=37.4 K 2024-12-10T14:27:24,168 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/2afc1c045f854314b324b2280e8f4e23, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/1688992b2bce403ebc6fc616866e114f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57a0883eca5c4ea683736cfe3c000713] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=35.2 K 2024-12-10T14:27:24,168 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 81429b6c13a64ac5ba870ffde14384ac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733840841053 2024-12-10T14:27:24,168 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 2afc1c045f854314b324b2280e8f4e23, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733840841053 2024-12-10T14:27:24,168 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting da485c7f31614619a535a6a39a629762, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733840841079 2024-12-10T14:27:24,168 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1688992b2bce403ebc6fc616866e114f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733840841079 2024-12-10T14:27:24,169 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting cac19e98b4ae4b84a3cc1396e93666f0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733840843208 2024-12-10T14:27:24,169 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 57a0883eca5c4ea683736cfe3c000713, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733840843208 2024-12-10T14:27:24,175 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#B#compaction#354 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:24,175 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/57c5b07a81fd485eaf9b35d88af2191f is 50, key is test_row_0/B:col10/1733840843208/Put/seqid=0 2024-12-10T14:27:24,176 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#A#compaction#355 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:24,176 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/6c60c3ef90fe46dda39c4734d4fe8130 is 50, key is test_row_0/A:col10/1733840843208/Put/seqid=0 2024-12-10T14:27:24,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742255_1431 (size=12104) 2024-12-10T14:27:24,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742256_1432 (size=12104) 2024-12-10T14:27:24,185 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/57c5b07a81fd485eaf9b35d88af2191f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57c5b07a81fd485eaf9b35d88af2191f 2024-12-10T14:27:24,186 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/6c60c3ef90fe46dda39c4734d4fe8130 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6c60c3ef90fe46dda39c4734d4fe8130 2024-12-10T14:27:24,189 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/B of d2de72cdb20a8dd601845b8e001f941b into 57c5b07a81fd485eaf9b35d88af2191f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:24,189 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:24,189 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/B, priority=13, startTime=1733840844167; duration=0sec 2024-12-10T14:27:24,189 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:24,189 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:B 2024-12-10T14:27:24,189 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:24,190 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:24,190 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/A of d2de72cdb20a8dd601845b8e001f941b into 6c60c3ef90fe46dda39c4734d4fe8130(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:24,190 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/C is initiating minor compaction (all files) 2024-12-10T14:27:24,190 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:24,190 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/A, priority=13, startTime=1733840844166; duration=0sec 2024-12-10T14:27:24,190 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/C in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:24,191 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:24,191 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:A 2024-12-10T14:27:24,191 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b6d9651cc3f14884887e252429cbb014, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/26e9c1b60cd8424a85ff2223f72382f2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/35f8924a06464ae288ec6361ca84a5f5] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=35.2 K 2024-12-10T14:27:24,191 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b6d9651cc3f14884887e252429cbb014, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733840841053 2024-12-10T14:27:24,191 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 26e9c1b60cd8424a85ff2223f72382f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733840841079 2024-12-10T14:27:24,191 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 35f8924a06464ae288ec6361ca84a5f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733840843208 2024-12-10T14:27:24,197 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#C#compaction#356 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:24,198 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/ca188d4ca1784193b86e26de27c29bad is 50, key is test_row_0/C:col10/1733840843208/Put/seqid=0 2024-12-10T14:27:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742257_1433 (size=12104) 2024-12-10T14:27:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:24,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T14:27:24,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:24,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:24,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:24,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:24,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:24,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:24,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/cfa02e1d3f224afdb3fe6dcac7d786f0 is 50, key is test_row_0/A:col10/1733840844472/Put/seqid=0 2024-12-10T14:27:24,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742258_1434 (size=12001) 2024-12-10T14:27:24,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840904483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840904485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840904489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840904591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840904591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840904591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,610 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/ca188d4ca1784193b86e26de27c29bad as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca188d4ca1784193b86e26de27c29bad 2024-12-10T14:27:24,614 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/C of d2de72cdb20a8dd601845b8e001f941b into ca188d4ca1784193b86e26de27c29bad(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:24,614 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:24,614 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/C, priority=13, startTime=1733840844167; duration=0sec 2024-12-10T14:27:24,614 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:24,614 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:C 2024-12-10T14:27:24,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840904795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840904795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840904796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:24,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/cfa02e1d3f224afdb3fe6dcac7d786f0 2024-12-10T14:27:24,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/6400d11e3fa74cf7ae892c3189a20ad6 is 50, key is test_row_0/B:col10/1733840844472/Put/seqid=0 2024-12-10T14:27:24,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742259_1435 (size=12001) 2024-12-10T14:27:25,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840905099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840905099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840905101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T14:27:25,155 INFO [Thread-1899 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-10T14:27:25,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:25,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-10T14:27:25,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:27:25,158 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:25,158 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:25,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:25,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840905218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,220 DEBUG [Thread-1891 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:27:25,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840905222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,224 DEBUG [Thread-1897 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:27:25,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:27:25,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/6400d11e3fa74cf7ae892c3189a20ad6 2024-12-10T14:27:25,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/369e9de332a640c7803c9bc9ec31b255 is 50, key is test_row_0/C:col10/1733840844472/Put/seqid=0 2024-12-10T14:27:25,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742260_1436 (size=12001) 2024-12-10T14:27:25,309 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:25,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T14:27:25,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:25,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:25,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:25,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:27:25,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:25,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T14:27:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:25,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840905602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840905603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840905605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:25,615 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:25,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T14:27:25,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:25,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:25,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:25,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:25,704 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/369e9de332a640c7803c9bc9ec31b255 2024-12-10T14:27:25,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/cfa02e1d3f224afdb3fe6dcac7d786f0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cfa02e1d3f224afdb3fe6dcac7d786f0 2024-12-10T14:27:25,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cfa02e1d3f224afdb3fe6dcac7d786f0, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T14:27:25,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/6400d11e3fa74cf7ae892c3189a20ad6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/6400d11e3fa74cf7ae892c3189a20ad6 2024-12-10T14:27:25,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/6400d11e3fa74cf7ae892c3189a20ad6, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T14:27:25,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/369e9de332a640c7803c9bc9ec31b255 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/369e9de332a640c7803c9bc9ec31b255 2024-12-10T14:27:25,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/369e9de332a640c7803c9bc9ec31b255, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T14:27:25,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d2de72cdb20a8dd601845b8e001f941b in 1247ms, sequenceid=78, compaction requested=false 2024-12-10T14:27:25,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:27:25,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:25,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T14:27:25,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:25,768 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:27:25,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:25,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:25,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:25,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:25,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:25,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:25,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/b5de61e68ff34e5c9ed9463a3d5e1b46 is 50, key is test_row_0/A:col10/1733840844482/Put/seqid=0 2024-12-10T14:27:25,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742261_1437 (size=12001) 2024-12-10T14:27:26,177 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/b5de61e68ff34e5c9ed9463a3d5e1b46 2024-12-10T14:27:26,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c6319c9981074936a2c74bf22d901d8a is 50, key is test_row_0/B:col10/1733840844482/Put/seqid=0 2024-12-10T14:27:26,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742262_1438 (size=12001) 2024-12-10T14:27:26,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:27:26,605 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c6319c9981074936a2c74bf22d901d8a 2024-12-10T14:27:26,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/6818f378f16b4a2c9805e1488ea06647 is 50, key is test_row_0/C:col10/1733840844482/Put/seqid=0 2024-12-10T14:27:26,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:26,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:26,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742263_1439 (size=12001) 2024-12-10T14:27:26,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840906639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840906643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840906649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840906751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840906755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840906755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840906955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:26,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840906959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:26,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840906959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,019 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/6818f378f16b4a2c9805e1488ea06647 2024-12-10T14:27:27,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/b5de61e68ff34e5c9ed9463a3d5e1b46 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b5de61e68ff34e5c9ed9463a3d5e1b46 2024-12-10T14:27:27,026 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b5de61e68ff34e5c9ed9463a3d5e1b46, entries=150, sequenceid=91, filesize=11.7 K 2024-12-10T14:27:27,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c6319c9981074936a2c74bf22d901d8a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6319c9981074936a2c74bf22d901d8a 2024-12-10T14:27:27,031 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6319c9981074936a2c74bf22d901d8a, entries=150, sequenceid=91, filesize=11.7 K 2024-12-10T14:27:27,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/6818f378f16b4a2c9805e1488ea06647 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6818f378f16b4a2c9805e1488ea06647 2024-12-10T14:27:27,035 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6818f378f16b4a2c9805e1488ea06647, entries=150, sequenceid=91, filesize=11.7 K 2024-12-10T14:27:27,036 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d2de72cdb20a8dd601845b8e001f941b in 1268ms, sequenceid=91, compaction requested=true 2024-12-10T14:27:27,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:27,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-10T14:27:27,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-10T14:27:27,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-10T14:27:27,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8790 sec 2024-12-10T14:27:27,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.8820 sec 2024-12-10T14:27:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T14:27:27,261 INFO [Thread-1899 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-10T14:27:27,262 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:27,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-10T14:27:27,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:27,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T14:27:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T14:27:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:27,264 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:27,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:27,265 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:27,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:27,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8 is 50, key is test_row_0/A:col10/1733840847262/Put/seqid=0 2024-12-10T14:27:27,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742264_1440 (size=14341) 2024-12-10T14:27:27,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840907279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840907282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840907285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T14:27:27,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840907385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840907390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840907391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,416 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:27,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:27,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:27,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T14:27:27,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:27,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:27,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:27,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840907591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840907595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840907596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8 2024-12-10T14:27:27,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/67a2f59be0dd4fb0897bef888250eced is 50, key is test_row_0/B:col10/1733840847262/Put/seqid=0 2024-12-10T14:27:27,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742265_1441 (size=12001) 2024-12-10T14:27:27,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:27,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:27,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T14:27:27,873 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:27,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:27,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:27,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:27,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:27,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840907898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840907901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840907902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:28,026 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:28,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:28,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:28,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/67a2f59be0dd4fb0897bef888250eced 2024-12-10T14:27:28,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/05a46b7c30d34fa1b72a140261578b03 is 50, key is test_row_0/C:col10/1733840847262/Put/seqid=0 2024-12-10T14:27:28,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742266_1442 (size=12001) 2024-12-10T14:27:28,178 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:28,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:28,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:28,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:28,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:28,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:28,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T14:27:28,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:28,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840908404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:28,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:28,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840908407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:28,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:28,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840908408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:28,482 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:28,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:28,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:28,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:28,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/05a46b7c30d34fa1b72a140261578b03 2024-12-10T14:27:28,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8 2024-12-10T14:27:28,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8, entries=200, sequenceid=116, filesize=14.0 K 2024-12-10T14:27:28,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/67a2f59be0dd4fb0897bef888250eced as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/67a2f59be0dd4fb0897bef888250eced 2024-12-10T14:27:28,504 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/67a2f59be0dd4fb0897bef888250eced, entries=150, sequenceid=116, filesize=11.7 K 2024-12-10T14:27:28,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/05a46b7c30d34fa1b72a140261578b03 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/05a46b7c30d34fa1b72a140261578b03 2024-12-10T14:27:28,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/05a46b7c30d34fa1b72a140261578b03, entries=150, sequenceid=116, filesize=11.7 K 2024-12-10T14:27:28,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2de72cdb20a8dd601845b8e001f941b in 1245ms, sequenceid=116, compaction requested=true 2024-12-10T14:27:28,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:28,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:28,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:28,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:28,509 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:28,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:28,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:28,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:28,509 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:28,510 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:28,510 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/A is initiating minor compaction (all files) 2024-12-10T14:27:28,510 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/A in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,510 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6c60c3ef90fe46dda39c4734d4fe8130, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cfa02e1d3f224afdb3fe6dcac7d786f0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b5de61e68ff34e5c9ed9463a3d5e1b46, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=49.3 K 2024-12-10T14:27:28,510 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:28,510 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/B is initiating minor compaction (all files) 2024-12-10T14:27:28,510 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c60c3ef90fe46dda39c4734d4fe8130, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733840843208 2024-12-10T14:27:28,511 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/B in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,511 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57c5b07a81fd485eaf9b35d88af2191f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/6400d11e3fa74cf7ae892c3189a20ad6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6319c9981074936a2c74bf22d901d8a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/67a2f59be0dd4fb0897bef888250eced] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=47.0 K 2024-12-10T14:27:28,511 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfa02e1d3f224afdb3fe6dcac7d786f0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840843350 2024-12-10T14:27:28,511 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 57c5b07a81fd485eaf9b35d88af2191f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733840843208 2024-12-10T14:27:28,511 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5de61e68ff34e5c9ed9463a3d5e1b46, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733840844479 2024-12-10T14:27:28,511 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 6400d11e3fa74cf7ae892c3189a20ad6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840843350 2024-12-10T14:27:28,512 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b2d9f8482d94aa4b84ff5e1efc9b2f8, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733840846631 2024-12-10T14:27:28,512 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c6319c9981074936a2c74bf22d901d8a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733840844479 2024-12-10T14:27:28,512 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 67a2f59be0dd4fb0897bef888250eced, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733840846631 2024-12-10T14:27:28,519 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#A#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:28,520 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/1868f71fb2aa48b4b8bce5e838637926 is 50, key is test_row_0/A:col10/1733840847262/Put/seqid=0 2024-12-10T14:27:28,523 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#B#compaction#367 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:28,523 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c6cb09cf0ffe4408b420394f9cd4be00 is 50, key is test_row_0/B:col10/1733840847262/Put/seqid=0 2024-12-10T14:27:28,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742267_1443 (size=12241) 2024-12-10T14:27:28,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742268_1444 (size=12241) 2024-12-10T14:27:28,635 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:28,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T14:27:28,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,636 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:27:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:28,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a1625115f79c49f0b559f2df4cbf9508 is 50, key is test_row_0/A:col10/1733840847284/Put/seqid=0 2024-12-10T14:27:28,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742269_1445 (size=12001) 2024-12-10T14:27:28,931 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/1868f71fb2aa48b4b8bce5e838637926 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/1868f71fb2aa48b4b8bce5e838637926 2024-12-10T14:27:28,935 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/A of d2de72cdb20a8dd601845b8e001f941b into 1868f71fb2aa48b4b8bce5e838637926(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:28,935 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:28,935 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/A, priority=12, startTime=1733840848509; duration=0sec 2024-12-10T14:27:28,935 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:28,935 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:A 2024-12-10T14:27:28,935 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:28,937 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:28,937 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/C is initiating minor compaction (all files) 2024-12-10T14:27:28,937 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/C in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:28,937 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca188d4ca1784193b86e26de27c29bad, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/369e9de332a640c7803c9bc9ec31b255, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6818f378f16b4a2c9805e1488ea06647, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/05a46b7c30d34fa1b72a140261578b03] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=47.0 K 2024-12-10T14:27:28,937 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca188d4ca1784193b86e26de27c29bad, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733840843208 2024-12-10T14:27:28,937 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 369e9de332a640c7803c9bc9ec31b255, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733840843350 2024-12-10T14:27:28,938 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6818f378f16b4a2c9805e1488ea06647, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733840844479 2024-12-10T14:27:28,938 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05a46b7c30d34fa1b72a140261578b03, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733840846631 2024-12-10T14:27:28,938 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c6cb09cf0ffe4408b420394f9cd4be00 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6cb09cf0ffe4408b420394f9cd4be00 2024-12-10T14:27:28,942 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/B of d2de72cdb20a8dd601845b8e001f941b into c6cb09cf0ffe4408b420394f9cd4be00(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:28,942 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:28,942 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/B, priority=12, startTime=1733840848509; duration=0sec 2024-12-10T14:27:28,942 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:28,942 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:B 2024-12-10T14:27:28,946 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#C#compaction#369 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:28,947 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/b8339c22770346adb72dd71650df8a77 is 50, key is test_row_0/C:col10/1733840847262/Put/seqid=0 2024-12-10T14:27:28,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742270_1446 (size=12241) 2024-12-10T14:27:29,044 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a1625115f79c49f0b559f2df4cbf9508 2024-12-10T14:27:29,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/b56711b7517042de9037a03d242ade61 is 50, key is test_row_0/B:col10/1733840847284/Put/seqid=0 2024-12-10T14:27:29,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742271_1447 (size=12001) 2024-12-10T14:27:29,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:29,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:29,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840909289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840909290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,358 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/b8339c22770346adb72dd71650df8a77 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b8339c22770346adb72dd71650df8a77 2024-12-10T14:27:29,362 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/C of d2de72cdb20a8dd601845b8e001f941b into b8339c22770346adb72dd71650df8a77(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:29,362 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:29,362 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/C, priority=12, startTime=1733840848509; duration=0sec 2024-12-10T14:27:29,362 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:29,362 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:C 2024-12-10T14:27:29,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T14:27:29,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840909394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840909395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840909411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840909416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840909421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,459 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/b56711b7517042de9037a03d242ade61 2024-12-10T14:27:29,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/d77c0ba1f66e4cbca185933598cecdf0 is 50, key is test_row_0/C:col10/1733840847284/Put/seqid=0 2024-12-10T14:27:29,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742272_1448 (size=12001) 2024-12-10T14:27:29,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840909599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840909599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,872 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/d77c0ba1f66e4cbca185933598cecdf0 2024-12-10T14:27:29,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a1625115f79c49f0b559f2df4cbf9508 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a1625115f79c49f0b559f2df4cbf9508 2024-12-10T14:27:29,879 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a1625115f79c49f0b559f2df4cbf9508, entries=150, sequenceid=127, filesize=11.7 K 2024-12-10T14:27:29,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/b56711b7517042de9037a03d242ade61 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/b56711b7517042de9037a03d242ade61 2024-12-10T14:27:29,883 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/b56711b7517042de9037a03d242ade61, entries=150, sequenceid=127, filesize=11.7 K 2024-12-10T14:27:29,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/d77c0ba1f66e4cbca185933598cecdf0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/d77c0ba1f66e4cbca185933598cecdf0 2024-12-10T14:27:29,887 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/d77c0ba1f66e4cbca185933598cecdf0, entries=150, sequenceid=127, filesize=11.7 K 2024-12-10T14:27:29,887 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2de72cdb20a8dd601845b8e001f941b in 1252ms, sequenceid=127, compaction requested=false 2024-12-10T14:27:29,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:29,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:29,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-10T14:27:29,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-10T14:27:29,890 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-10T14:27:29,890 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6240 sec 2024-12-10T14:27:29,891 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.6280 sec 2024-12-10T14:27:29,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:29,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T14:27:29,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:29,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:29,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:29,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:29,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:29,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:29,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/130f6430b78944f99d67154dc34ddea3 is 50, key is test_row_0/A:col10/1733840849907/Put/seqid=0 2024-12-10T14:27:29,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742273_1449 (size=16931) 2024-12-10T14:27:29,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840909920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:29,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:29,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840909922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:30,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:30,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840910027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:30,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:30,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840910028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:30,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:30,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840910231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:30,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:30,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840910231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:30,317 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/130f6430b78944f99d67154dc34ddea3 2024-12-10T14:27:30,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/4272f0caddf44f9488716982ae81ee5c is 50, key is test_row_0/B:col10/1733840849907/Put/seqid=0 2024-12-10T14:27:30,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742274_1450 (size=12151) 2024-12-10T14:27:30,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840910536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:30,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840910538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:30,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/4272f0caddf44f9488716982ae81ee5c 2024-12-10T14:27:30,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/87b51d053c2e44c3b307b0bec421293e is 50, key is test_row_0/C:col10/1733840849907/Put/seqid=0 2024-12-10T14:27:30,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742275_1451 (size=12151) 2024-12-10T14:27:31,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840911041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840911043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/87b51d053c2e44c3b307b0bec421293e 2024-12-10T14:27:31,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/130f6430b78944f99d67154dc34ddea3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/130f6430b78944f99d67154dc34ddea3 2024-12-10T14:27:31,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/130f6430b78944f99d67154dc34ddea3, entries=250, sequenceid=157, filesize=16.5 K 2024-12-10T14:27:31,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/4272f0caddf44f9488716982ae81ee5c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/4272f0caddf44f9488716982ae81ee5c 2024-12-10T14:27:31,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/4272f0caddf44f9488716982ae81ee5c, entries=150, sequenceid=157, filesize=11.9 K 2024-12-10T14:27:31,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/87b51d053c2e44c3b307b0bec421293e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/87b51d053c2e44c3b307b0bec421293e 2024-12-10T14:27:31,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/87b51d053c2e44c3b307b0bec421293e, entries=150, sequenceid=157, filesize=11.9 K 2024-12-10T14:27:31,153 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-10T14:27:31,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for d2de72cdb20a8dd601845b8e001f941b in 1245ms, sequenceid=157, compaction requested=true 2024-12-10T14:27:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:31,154 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:31,154 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:31,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:31,155 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41173 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:31,155 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:31,155 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/B is initiating minor compaction (all files) 2024-12-10T14:27:31,155 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/A is initiating minor compaction (all files) 2024-12-10T14:27:31,155 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/B in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,155 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/A in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,155 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/1868f71fb2aa48b4b8bce5e838637926, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a1625115f79c49f0b559f2df4cbf9508, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/130f6430b78944f99d67154dc34ddea3] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=40.2 K 2024-12-10T14:27:31,155 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6cb09cf0ffe4408b420394f9cd4be00, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/b56711b7517042de9037a03d242ade61, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/4272f0caddf44f9488716982ae81ee5c] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=35.5 K 2024-12-10T14:27:31,156 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1868f71fb2aa48b4b8bce5e838637926, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733840846631 2024-12-10T14:27:31,156 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c6cb09cf0ffe4408b420394f9cd4be00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733840846631 2024-12-10T14:27:31,156 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b56711b7517042de9037a03d242ade61, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733840847276 2024-12-10T14:27:31,156 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1625115f79c49f0b559f2df4cbf9508, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733840847276 2024-12-10T14:27:31,157 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 130f6430b78944f99d67154dc34ddea3, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733840849279 2024-12-10T14:27:31,157 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 4272f0caddf44f9488716982ae81ee5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733840849279 2024-12-10T14:27:31,163 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#A#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:31,163 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/9ef3354185714fe692b56fdf10a5530c is 50, key is test_row_0/A:col10/1733840849907/Put/seqid=0 2024-12-10T14:27:31,170 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#B#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:31,171 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/a5a16851f7ce47bd8bf670dae33d9318 is 50, key is test_row_0/B:col10/1733840849907/Put/seqid=0 2024-12-10T14:27:31,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742276_1452 (size=12493) 2024-12-10T14:27:31,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742277_1453 (size=12493) 2024-12-10T14:27:31,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T14:27:31,368 INFO [Thread-1899 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-10T14:27:31,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:31,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-10T14:27:31,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T14:27:31,370 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:31,371 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:31,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:31,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:31,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T14:27:31,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:31,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:31,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:31,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:31,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:31,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:31,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/7920d1740a14471e8c59cec93b80370a is 50, key is test_row_0/A:col10/1733840849919/Put/seqid=0 2024-12-10T14:27:31,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742278_1454 (size=14541) 2024-12-10T14:27:31,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T14:27:31,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840911477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840911482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840911483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:31,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T14:27:31,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:31,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,579 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/9ef3354185714fe692b56fdf10a5530c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/9ef3354185714fe692b56fdf10a5530c 2024-12-10T14:27:31,579 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/a5a16851f7ce47bd8bf670dae33d9318 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/a5a16851f7ce47bd8bf670dae33d9318 2024-12-10T14:27:31,584 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/B of d2de72cdb20a8dd601845b8e001f941b into a5a16851f7ce47bd8bf670dae33d9318(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:31,584 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/A of d2de72cdb20a8dd601845b8e001f941b into 9ef3354185714fe692b56fdf10a5530c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:31,584 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:31,584 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:31,584 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/B, priority=13, startTime=1733840851154; duration=0sec 2024-12-10T14:27:31,584 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/A, priority=13, startTime=1733840851154; duration=0sec 2024-12-10T14:27:31,584 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:31,584 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:B 2024-12-10T14:27:31,585 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:31,585 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:A 2024-12-10T14:27:31,585 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:31,585 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:31,585 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/C is initiating minor compaction (all files) 2024-12-10T14:27:31,585 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/C in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,585 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b8339c22770346adb72dd71650df8a77, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/d77c0ba1f66e4cbca185933598cecdf0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/87b51d053c2e44c3b307b0bec421293e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=35.5 K 2024-12-10T14:27:31,586 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b8339c22770346adb72dd71650df8a77, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733840846631 2024-12-10T14:27:31,586 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting d77c0ba1f66e4cbca185933598cecdf0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733840847276 2024-12-10T14:27:31,586 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 87b51d053c2e44c3b307b0bec421293e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733840849279 2024-12-10T14:27:31,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840911585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840911585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,593 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#C#compaction#378 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:31,593 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/743618e951d34d248bde437dd64ce2b2 is 50, key is test_row_0/C:col10/1733840849907/Put/seqid=0 2024-12-10T14:27:31,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840911592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742279_1455 (size=12493) 2024-12-10T14:27:31,600 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/743618e951d34d248bde437dd64ce2b2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/743618e951d34d248bde437dd64ce2b2 2024-12-10T14:27:31,604 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/C of d2de72cdb20a8dd601845b8e001f941b into 743618e951d34d248bde437dd64ce2b2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:31,604 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:31,604 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/C, priority=13, startTime=1733840851154; duration=0sec 2024-12-10T14:27:31,604 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:31,604 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:C 2024-12-10T14:27:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T14:27:31,674 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:31,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T14:27:31,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:31,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840911790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840911790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840911795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:31,827 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:31,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T14:27:31,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:31,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/7920d1740a14471e8c59cec93b80370a 2024-12-10T14:27:31,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/e7466b1264aa44979fbc782c4d54499c is 50, key is test_row_0/B:col10/1733840849919/Put/seqid=0 2024-12-10T14:27:31,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742280_1456 (size=12151) 2024-12-10T14:27:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T14:27:31,979 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:31,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T14:27:31,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:31,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:31,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:32,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840912045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840912053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840912095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840912096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840912098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,132 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:32,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T14:27:32,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:32,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:32,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:32,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:32,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:32,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/e7466b1264aa44979fbc782c4d54499c 2024-12-10T14:27:32,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/2d763ab7372146edab2ddda4dfee3ab7 is 50, key is test_row_0/C:col10/1733840849919/Put/seqid=0 2024-12-10T14:27:32,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742281_1457 (size=12151) 2024-12-10T14:27:32,257 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/2d763ab7372146edab2ddda4dfee3ab7 2024-12-10T14:27:32,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/7920d1740a14471e8c59cec93b80370a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/7920d1740a14471e8c59cec93b80370a 2024-12-10T14:27:32,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/7920d1740a14471e8c59cec93b80370a, entries=200, sequenceid=168, filesize=14.2 K 2024-12-10T14:27:32,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/e7466b1264aa44979fbc782c4d54499c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/e7466b1264aa44979fbc782c4d54499c 2024-12-10T14:27:32,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/e7466b1264aa44979fbc782c4d54499c, entries=150, sequenceid=168, filesize=11.9 K 2024-12-10T14:27:32,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/2d763ab7372146edab2ddda4dfee3ab7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/2d763ab7372146edab2ddda4dfee3ab7 2024-12-10T14:27:32,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/2d763ab7372146edab2ddda4dfee3ab7, entries=150, sequenceid=168, filesize=11.9 K 2024-12-10T14:27:32,271 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d2de72cdb20a8dd601845b8e001f941b in 844ms, sequenceid=168, compaction requested=false 2024-12-10T14:27:32,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:32,284 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:32,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T14:27:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:32,285 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T14:27:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:32,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:32,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:32,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:32,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:32,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/599bca0ce79a41e4b9c54c2e406c01db is 50, key is test_row_0/A:col10/1733840851481/Put/seqid=0 2024-12-10T14:27:32,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742282_1458 (size=12151) 2024-12-10T14:27:32,294 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/599bca0ce79a41e4b9c54c2e406c01db 2024-12-10T14:27:32,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/44c0595877ce4b70a61b3df7161fc747 is 50, key is test_row_0/B:col10/1733840851481/Put/seqid=0 2024-12-10T14:27:32,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742283_1459 (size=12151) 2024-12-10T14:27:32,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T14:27:32,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:32,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840912612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840912612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840912613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,712 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/44c0595877ce4b70a61b3df7161fc747 2024-12-10T14:27:32,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/ca70733a8f0d48c3a90e177f7b22292a is 50, key is test_row_0/C:col10/1733840851481/Put/seqid=0 2024-12-10T14:27:32,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742284_1460 (size=12151) 2024-12-10T14:27:32,722 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/ca70733a8f0d48c3a90e177f7b22292a 2024-12-10T14:27:32,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840912717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840912718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840912718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/599bca0ce79a41e4b9c54c2e406c01db as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/599bca0ce79a41e4b9c54c2e406c01db 2024-12-10T14:27:32,728 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/599bca0ce79a41e4b9c54c2e406c01db, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T14:27:32,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/44c0595877ce4b70a61b3df7161fc747 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/44c0595877ce4b70a61b3df7161fc747 2024-12-10T14:27:32,732 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/44c0595877ce4b70a61b3df7161fc747, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T14:27:32,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/ca70733a8f0d48c3a90e177f7b22292a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca70733a8f0d48c3a90e177f7b22292a 2024-12-10T14:27:32,735 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca70733a8f0d48c3a90e177f7b22292a, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T14:27:32,736 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d2de72cdb20a8dd601845b8e001f941b in 451ms, sequenceid=196, compaction requested=true 2024-12-10T14:27:32,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:32,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:32,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-10T14:27:32,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-10T14:27:32,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-10T14:27:32,738 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3660 sec 2024-12-10T14:27:32,739 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.3690 sec 2024-12-10T14:27:32,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:32,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:27:32,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:32,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:32,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:32,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:32,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:32,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:32,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/6a9570973b8e4e52a8b9f5db7b858dee is 50, key is test_row_0/A:col10/1733840852611/Put/seqid=0 2024-12-10T14:27:32,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742285_1461 (size=16931) 2024-12-10T14:27:32,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840912966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840912970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:32,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:32,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840912971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840913072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840913076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840913080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840913276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840913284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840913284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/6a9570973b8e4e52a8b9f5db7b858dee 2024-12-10T14:27:33,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/5bd077a30c594ad08ad03abd21df6c38 is 50, key is test_row_0/B:col10/1733840852611/Put/seqid=0 2024-12-10T14:27:33,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742286_1462 (size=12151) 2024-12-10T14:27:33,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T14:27:33,474 INFO [Thread-1899 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-10T14:27:33,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:33,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-10T14:27:33,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T14:27:33,477 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:33,477 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:33,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:33,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T14:27:33,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840913580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840913590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:33,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840913591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:33,629 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:33,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T14:27:33,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:33,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:33,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:33,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/5bd077a30c594ad08ad03abd21df6c38 2024-12-10T14:27:33,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4a13a1edae824250a5cee897a2f8e767 is 50, key is test_row_0/C:col10/1733840852611/Put/seqid=0 2024-12-10T14:27:33,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742287_1463 (size=12151) 2024-12-10T14:27:33,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T14:27:33,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:33,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T14:27:33,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:33,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:33,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:33,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,934 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:33,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T14:27:33,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:33,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:33,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:33,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:33,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:34,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:34,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840914050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:34,055 DEBUG [Thread-1891 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4133 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:27:34,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:34,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840914070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:34,074 DEBUG [Thread-1897 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:27:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T14:27:34,086 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:34,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T14:27:34,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:34,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:34,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:34,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:34,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:34,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:34,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840914087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:34,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:34,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840914096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:34,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:34,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840914099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:34,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4a13a1edae824250a5cee897a2f8e767 2024-12-10T14:27:34,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/6a9570973b8e4e52a8b9f5db7b858dee as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6a9570973b8e4e52a8b9f5db7b858dee 2024-12-10T14:27:34,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6a9570973b8e4e52a8b9f5db7b858dee, entries=250, sequenceid=209, filesize=16.5 K 2024-12-10T14:27:34,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/5bd077a30c594ad08ad03abd21df6c38 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/5bd077a30c594ad08ad03abd21df6c38 2024-12-10T14:27:34,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/5bd077a30c594ad08ad03abd21df6c38, entries=150, sequenceid=209, filesize=11.9 K 2024-12-10T14:27:34,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4a13a1edae824250a5cee897a2f8e767 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4a13a1edae824250a5cee897a2f8e767 2024-12-10T14:27:34,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4a13a1edae824250a5cee897a2f8e767, entries=150, sequenceid=209, filesize=11.9 K 2024-12-10T14:27:34,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d2de72cdb20a8dd601845b8e001f941b in 1259ms, sequenceid=209, compaction requested=true 2024-12-10T14:27:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:34,187 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:34,187 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:34,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:34,188 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:34,188 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:34,188 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/A is initiating minor compaction (all files) 2024-12-10T14:27:34,188 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/B is initiating minor compaction (all files) 2024-12-10T14:27:34,188 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/A in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:34,188 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/B in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:34,189 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/9ef3354185714fe692b56fdf10a5530c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/7920d1740a14471e8c59cec93b80370a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/599bca0ce79a41e4b9c54c2e406c01db, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6a9570973b8e4e52a8b9f5db7b858dee] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=54.8 K 2024-12-10T14:27:34,189 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/a5a16851f7ce47bd8bf670dae33d9318, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/e7466b1264aa44979fbc782c4d54499c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/44c0595877ce4b70a61b3df7161fc747, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/5bd077a30c594ad08ad03abd21df6c38] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=47.8 K 2024-12-10T14:27:34,189 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ef3354185714fe692b56fdf10a5530c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733840849279 2024-12-10T14:27:34,189 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a5a16851f7ce47bd8bf670dae33d9318, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733840849279 2024-12-10T14:27:34,189 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7920d1740a14471e8c59cec93b80370a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733840849917 2024-12-10T14:27:34,189 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e7466b1264aa44979fbc782c4d54499c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733840849919 2024-12-10T14:27:34,189 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 599bca0ce79a41e4b9c54c2e406c01db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733840851473 2024-12-10T14:27:34,189 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 44c0595877ce4b70a61b3df7161fc747, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733840851473 2024-12-10T14:27:34,190 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a9570973b8e4e52a8b9f5db7b858dee, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733840852611 2024-12-10T14:27:34,190 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bd077a30c594ad08ad03abd21df6c38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733840852611 2024-12-10T14:27:34,199 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#B#compaction#388 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:34,199 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#A#compaction#387 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:34,199 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/fede36cd60d74c6f91fedd65441327d8 is 50, key is test_row_0/B:col10/1733840852611/Put/seqid=0 2024-12-10T14:27:34,199 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/b86a60130e4f4e1a827814ae858539db is 50, key is test_row_0/A:col10/1733840852611/Put/seqid=0 2024-12-10T14:27:34,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742288_1464 (size=12629) 2024-12-10T14:27:34,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742289_1465 (size=12629) 2024-12-10T14:27:34,208 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/fede36cd60d74c6f91fedd65441327d8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fede36cd60d74c6f91fedd65441327d8 2024-12-10T14:27:34,211 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/B of d2de72cdb20a8dd601845b8e001f941b into fede36cd60d74c6f91fedd65441327d8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:34,211 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:34,211 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/B, priority=12, startTime=1733840854187; duration=0sec 2024-12-10T14:27:34,212 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:34,212 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:B 2024-12-10T14:27:34,212 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:34,213 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:34,213 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/C is initiating minor compaction (all files) 2024-12-10T14:27:34,213 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/C in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:34,213 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/743618e951d34d248bde437dd64ce2b2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/2d763ab7372146edab2ddda4dfee3ab7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca70733a8f0d48c3a90e177f7b22292a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4a13a1edae824250a5cee897a2f8e767] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=47.8 K 2024-12-10T14:27:34,213 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 743618e951d34d248bde437dd64ce2b2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733840849279 2024-12-10T14:27:34,213 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d763ab7372146edab2ddda4dfee3ab7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733840849919 2024-12-10T14:27:34,214 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ca70733a8f0d48c3a90e177f7b22292a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733840851473 2024-12-10T14:27:34,214 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a13a1edae824250a5cee897a2f8e767, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733840852611 2024-12-10T14:27:34,221 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#C#compaction#389 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:34,221 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/b12231ed1d834f648f065ab6dc392374 is 50, key is test_row_0/C:col10/1733840852611/Put/seqid=0 2024-12-10T14:27:34,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742290_1466 (size=12629) 2024-12-10T14:27:34,229 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/b12231ed1d834f648f065ab6dc392374 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b12231ed1d834f648f065ab6dc392374 2024-12-10T14:27:34,232 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/C of d2de72cdb20a8dd601845b8e001f941b into b12231ed1d834f648f065ab6dc392374(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:34,232 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:34,232 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/C, priority=12, startTime=1733840854187; duration=0sec 2024-12-10T14:27:34,232 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:34,232 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:C 2024-12-10T14:27:34,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:34,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T14:27:34,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:34,245 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T14:27:34,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:34,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:34,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:34,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:34,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:34,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:34,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a34994738f454dd08953eb19a801ac39 is 50, key is test_row_0/A:col10/1733840852959/Put/seqid=0 2024-12-10T14:27:34,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742291_1467 (size=12151) 2024-12-10T14:27:34,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T14:27:34,607 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/b86a60130e4f4e1a827814ae858539db as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b86a60130e4f4e1a827814ae858539db 2024-12-10T14:27:34,611 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/A of d2de72cdb20a8dd601845b8e001f941b into b86a60130e4f4e1a827814ae858539db(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:34,611 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:34,611 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/A, priority=12, startTime=1733840854187; duration=0sec 2024-12-10T14:27:34,611 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:34,611 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:A 2024-12-10T14:27:34,653 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a34994738f454dd08953eb19a801ac39 2024-12-10T14:27:34,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/8ea40c2bf17445bc8b76f5e291579e8c is 50, key is test_row_0/B:col10/1733840852959/Put/seqid=0 2024-12-10T14:27:34,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742292_1468 (size=12151) 2024-12-10T14:27:35,064 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/8ea40c2bf17445bc8b76f5e291579e8c 2024-12-10T14:27:35,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/94aa1adbfb0e474fbd84ee97b2481a83 is 50, key is test_row_0/C:col10/1733840852959/Put/seqid=0 2024-12-10T14:27:35,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742293_1469 (size=12151) 2024-12-10T14:27:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:35,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:35,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840915114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840915115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840915117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840915219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840915219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840915221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840915424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840915424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840915424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,476 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/94aa1adbfb0e474fbd84ee97b2481a83 2024-12-10T14:27:35,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a34994738f454dd08953eb19a801ac39 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a34994738f454dd08953eb19a801ac39 2024-12-10T14:27:35,482 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a34994738f454dd08953eb19a801ac39, entries=150, sequenceid=234, filesize=11.9 K 2024-12-10T14:27:35,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/8ea40c2bf17445bc8b76f5e291579e8c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8ea40c2bf17445bc8b76f5e291579e8c 2024-12-10T14:27:35,485 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8ea40c2bf17445bc8b76f5e291579e8c, entries=150, sequenceid=234, filesize=11.9 K 2024-12-10T14:27:35,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/94aa1adbfb0e474fbd84ee97b2481a83 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/94aa1adbfb0e474fbd84ee97b2481a83 2024-12-10T14:27:35,489 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/94aa1adbfb0e474fbd84ee97b2481a83, entries=150, sequenceid=234, filesize=11.9 K 2024-12-10T14:27:35,489 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d2de72cdb20a8dd601845b8e001f941b in 1244ms, sequenceid=234, compaction requested=false 2024-12-10T14:27:35,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:35,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:35,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-10T14:27:35,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-10T14:27:35,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-10T14:27:35,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0130 sec 2024-12-10T14:27:35,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.0170 sec 2024-12-10T14:27:35,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T14:27:35,580 INFO [Thread-1899 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-10T14:27:35,581 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:35,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-10T14:27:35,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:27:35,583 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:35,583 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:35,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:35,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:27:35,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:35,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T14:27:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:35,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:35,734 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:35,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a05aa5fa9cdc420c9885c371508a159b is 50, key is test_row_0/A:col10/1733840855116/Put/seqid=0 2024-12-10T14:27:35,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:35,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:35,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:35,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:35,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:35,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:35,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:35,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742294_1470 (size=12151) 2024-12-10T14:27:35,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840915756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840915760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840915762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840915863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840915865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,871 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:35,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840915868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:27:35,886 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:35,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:35,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:35,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:35,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:35,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:35,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:35,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:36,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:36,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:36,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840916066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840916070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840916073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,139 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a05aa5fa9cdc420c9885c371508a159b 2024-12-10T14:27:36,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/92eddecfaa3a4a62826864c0e81bbb97 is 50, key is test_row_0/B:col10/1733840855116/Put/seqid=0 2024-12-10T14:27:36,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742295_1471 (size=12151) 2024-12-10T14:27:36,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:27:36,191 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:36,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:36,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:36,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:36,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:36,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:36,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840916369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840916374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840916380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,496 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:36,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:36,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:36,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/92eddecfaa3a4a62826864c0e81bbb97 2024-12-10T14:27:36,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/6ecc9681580d477b8a2fc6e6f6b0dbe0 is 50, key is test_row_0/C:col10/1733840855116/Put/seqid=0 2024-12-10T14:27:36,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742296_1472 (size=12151) 2024-12-10T14:27:36,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:36,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:36,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:36,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:27:36,802 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:36,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:36,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:36,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840916876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840916881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840916887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:36,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:36,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:36,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:36,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:36,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/6ecc9681580d477b8a2fc6e6f6b0dbe0 2024-12-10T14:27:36,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a05aa5fa9cdc420c9885c371508a159b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a05aa5fa9cdc420c9885c371508a159b 2024-12-10T14:27:36,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a05aa5fa9cdc420c9885c371508a159b, entries=150, sequenceid=249, filesize=11.9 K 2024-12-10T14:27:36,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/92eddecfaa3a4a62826864c0e81bbb97 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/92eddecfaa3a4a62826864c0e81bbb97 2024-12-10T14:27:36,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/92eddecfaa3a4a62826864c0e81bbb97, entries=150, sequenceid=249, filesize=11.9 K 2024-12-10T14:27:36,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/6ecc9681580d477b8a2fc6e6f6b0dbe0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6ecc9681580d477b8a2fc6e6f6b0dbe0 2024-12-10T14:27:36,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6ecc9681580d477b8a2fc6e6f6b0dbe0, entries=150, sequenceid=249, filesize=11.9 K 2024-12-10T14:27:36,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d2de72cdb20a8dd601845b8e001f941b in 1264ms, sequenceid=249, compaction requested=true 2024-12-10T14:27:36,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:36,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:36,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:36,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:36,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:36,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:36,994 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:36,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T14:27:36,994 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/B is initiating minor compaction (all files) 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/A is initiating minor compaction (all files) 2024-12-10T14:27:36,995 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/B in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,995 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/A in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:36,995 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fede36cd60d74c6f91fedd65441327d8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8ea40c2bf17445bc8b76f5e291579e8c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/92eddecfaa3a4a62826864c0e81bbb97] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=36.1 K 2024-12-10T14:27:36,995 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b86a60130e4f4e1a827814ae858539db, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a34994738f454dd08953eb19a801ac39, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a05aa5fa9cdc420c9885c371508a159b] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=36.1 K 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting fede36cd60d74c6f91fedd65441327d8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733840852611 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b86a60130e4f4e1a827814ae858539db, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733840852611 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ea40c2bf17445bc8b76f5e291579e8c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733840852959 2024-12-10T14:27:36,995 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a34994738f454dd08953eb19a801ac39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733840852959 2024-12-10T14:27:36,996 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 92eddecfaa3a4a62826864c0e81bbb97, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733840855109 2024-12-10T14:27:36,996 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a05aa5fa9cdc420c9885c371508a159b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733840855109 2024-12-10T14:27:37,002 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#A#compaction#396 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:37,002 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#B#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:37,003 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/b043dde992774cfb8b47642dd4c0129d is 50, key is test_row_0/A:col10/1733840855116/Put/seqid=0 2024-12-10T14:27:37,003 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/34f9134fbdde428cad873179675c6411 is 50, key is test_row_0/B:col10/1733840855116/Put/seqid=0 2024-12-10T14:27:37,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742298_1474 (size=12731) 2024-12-10T14:27:37,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742297_1473 (size=12731) 2024-12-10T14:27:37,107 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:37,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T14:27:37,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:37,108 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T14:27:37,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:37,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:37,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:37,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:37,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:37,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:37,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/09caf8c8bb56444685de64df11647a38 is 50, key is test_row_0/A:col10/1733840855760/Put/seqid=0 2024-12-10T14:27:37,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742299_1475 (size=12301) 2024-12-10T14:27:37,424 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/b043dde992774cfb8b47642dd4c0129d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b043dde992774cfb8b47642dd4c0129d 2024-12-10T14:27:37,424 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/34f9134fbdde428cad873179675c6411 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/34f9134fbdde428cad873179675c6411 2024-12-10T14:27:37,428 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/A of d2de72cdb20a8dd601845b8e001f941b into b043dde992774cfb8b47642dd4c0129d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:37,429 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:37,429 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/A, priority=13, startTime=1733840856994; duration=0sec 2024-12-10T14:27:37,429 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:37,429 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:A 2024-12-10T14:27:37,429 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:37,429 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/B of d2de72cdb20a8dd601845b8e001f941b into 34f9134fbdde428cad873179675c6411(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:37,429 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:37,429 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/B, priority=13, startTime=1733840856994; duration=0sec 2024-12-10T14:27:37,429 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:37,429 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:B 2024-12-10T14:27:37,430 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:37,430 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/C is initiating minor compaction (all files) 2024-12-10T14:27:37,430 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/C in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:37,430 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b12231ed1d834f648f065ab6dc392374, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/94aa1adbfb0e474fbd84ee97b2481a83, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6ecc9681580d477b8a2fc6e6f6b0dbe0] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=36.1 K 2024-12-10T14:27:37,430 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b12231ed1d834f648f065ab6dc392374, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733840852611 2024-12-10T14:27:37,431 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94aa1adbfb0e474fbd84ee97b2481a83, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733840852959 2024-12-10T14:27:37,431 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ecc9681580d477b8a2fc6e6f6b0dbe0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733840855109 2024-12-10T14:27:37,436 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#C#compaction#399 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:37,437 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/8a717162bd8c4fa8955a18fb9ae06b8e is 50, key is test_row_0/C:col10/1733840855116/Put/seqid=0 2024-12-10T14:27:37,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742300_1476 (size=12731) 2024-12-10T14:27:37,451 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/8a717162bd8c4fa8955a18fb9ae06b8e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/8a717162bd8c4fa8955a18fb9ae06b8e 2024-12-10T14:27:37,455 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/C of d2de72cdb20a8dd601845b8e001f941b into 8a717162bd8c4fa8955a18fb9ae06b8e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:37,455 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:37,455 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/C, priority=13, startTime=1733840856994; duration=0sec 2024-12-10T14:27:37,455 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:37,455 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:C 2024-12-10T14:27:37,518 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/09caf8c8bb56444685de64df11647a38 2024-12-10T14:27:37,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c5b2ff95e2254476b5c6d264998fc61a is 50, key is test_row_0/B:col10/1733840855760/Put/seqid=0 2024-12-10T14:27:37,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742301_1477 (size=12301) 2024-12-10T14:27:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:27:37,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:37,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:37,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840917905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:37,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840917906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:37,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840917910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:37,927 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c5b2ff95e2254476b5c6d264998fc61a 2024-12-10T14:27:37,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/f4ccb12387ce405cbdb1e0da5695d312 is 50, key is test_row_0/C:col10/1733840855760/Put/seqid=0 2024-12-10T14:27:37,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742302_1478 (size=12301) 2024-12-10T14:27:38,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840918012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840918012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840918016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52312 deadline: 1733840918068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,070 DEBUG [Thread-1891 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:27:38,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52316 deadline: 1733840918079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,084 DEBUG [Thread-1897 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., hostname=db1d50717577,46699,1733840717757, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:27:38,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840918219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840918219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840918220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,338 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/f4ccb12387ce405cbdb1e0da5695d312 2024-12-10T14:27:38,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/09caf8c8bb56444685de64df11647a38 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/09caf8c8bb56444685de64df11647a38 2024-12-10T14:27:38,352 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/09caf8c8bb56444685de64df11647a38, entries=150, sequenceid=271, filesize=12.0 K 2024-12-10T14:27:38,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/c5b2ff95e2254476b5c6d264998fc61a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c5b2ff95e2254476b5c6d264998fc61a 2024-12-10T14:27:38,356 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c5b2ff95e2254476b5c6d264998fc61a, entries=150, sequenceid=271, filesize=12.0 K 2024-12-10T14:27:38,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/f4ccb12387ce405cbdb1e0da5695d312 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f4ccb12387ce405cbdb1e0da5695d312 2024-12-10T14:27:38,361 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f4ccb12387ce405cbdb1e0da5695d312, entries=150, sequenceid=271, filesize=12.0 K 2024-12-10T14:27:38,362 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d2de72cdb20a8dd601845b8e001f941b in 1254ms, sequenceid=271, compaction requested=false 2024-12-10T14:27:38,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:38,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:38,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-10T14:27:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-10T14:27:38,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-10T14:27:38,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7810 sec 2024-12-10T14:27:38,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.7860 sec 2024-12-10T14:27:38,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:38,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:27:38,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:38,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:38,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:38,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:38,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:38,528 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:38,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/560f3934cee44f548c1048ac1c9b3d13 is 50, key is test_row_0/A:col10/1733840857908/Put/seqid=0 2024-12-10T14:27:38,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742303_1479 (size=14741) 2024-12-10T14:27:38,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840918557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840918558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840918566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840918667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840918668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840918674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840918872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840918873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840918879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:38,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/560f3934cee44f548c1048ac1c9b3d13 2024-12-10T14:27:38,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/fa42a17cdfdc489c92213405806a347a is 50, key is test_row_0/B:col10/1733840857908/Put/seqid=0 2024-12-10T14:27:38,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742304_1480 (size=12301) 2024-12-10T14:27:39,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840919179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840919179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840919184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/fa42a17cdfdc489c92213405806a347a 2024-12-10T14:27:39,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4ce0892861e145fbb01f54f8eeb4fdf8 is 50, key is test_row_0/C:col10/1733840857908/Put/seqid=0 2024-12-10T14:27:39,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742305_1481 (size=12301) 2024-12-10T14:27:39,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4ce0892861e145fbb01f54f8eeb4fdf8 2024-12-10T14:27:39,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/560f3934cee44f548c1048ac1c9b3d13 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/560f3934cee44f548c1048ac1c9b3d13 2024-12-10T14:27:39,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/560f3934cee44f548c1048ac1c9b3d13, entries=200, sequenceid=290, filesize=14.4 K 2024-12-10T14:27:39,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/fa42a17cdfdc489c92213405806a347a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fa42a17cdfdc489c92213405806a347a 2024-12-10T14:27:39,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fa42a17cdfdc489c92213405806a347a, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T14:27:39,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4ce0892861e145fbb01f54f8eeb4fdf8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4ce0892861e145fbb01f54f8eeb4fdf8 2024-12-10T14:27:39,389 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4ce0892861e145fbb01f54f8eeb4fdf8, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T14:27:39,390 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for d2de72cdb20a8dd601845b8e001f941b in 864ms, sequenceid=290, compaction requested=true 2024-12-10T14:27:39,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:39,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:39,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:39,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:39,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:39,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:39,390 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:39,390 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:39,390 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:39,391 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:39,391 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/A is initiating minor compaction (all files) 2024-12-10T14:27:39,391 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/A in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:39,391 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b043dde992774cfb8b47642dd4c0129d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/09caf8c8bb56444685de64df11647a38, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/560f3934cee44f548c1048ac1c9b3d13] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=38.8 K 2024-12-10T14:27:39,391 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:39,391 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/B is initiating minor compaction (all files) 2024-12-10T14:27:39,391 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/B in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:39,392 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/34f9134fbdde428cad873179675c6411, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c5b2ff95e2254476b5c6d264998fc61a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fa42a17cdfdc489c92213405806a347a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=36.5 K 2024-12-10T14:27:39,392 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b043dde992774cfb8b47642dd4c0129d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733840855109 2024-12-10T14:27:39,392 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 34f9134fbdde428cad873179675c6411, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733840855109 2024-12-10T14:27:39,392 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09caf8c8bb56444685de64df11647a38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733840855752 2024-12-10T14:27:39,392 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c5b2ff95e2254476b5c6d264998fc61a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733840855752 2024-12-10T14:27:39,393 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 560f3934cee44f548c1048ac1c9b3d13, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840857895 2024-12-10T14:27:39,393 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting fa42a17cdfdc489c92213405806a347a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840857908 2024-12-10T14:27:39,401 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#A#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:39,402 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/39c5c863e72c4c26a5b6e5d8d379db0f is 50, key is test_row_0/A:col10/1733840857908/Put/seqid=0 2024-12-10T14:27:39,404 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#B#compaction#406 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:39,405 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/ca0b175e4bab402984cd203a7909b86f is 50, key is test_row_0/B:col10/1733840857908/Put/seqid=0 2024-12-10T14:27:39,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742306_1482 (size=12983) 2024-12-10T14:27:39,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742307_1483 (size=12983) 2024-12-10T14:27:39,417 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/ca0b175e4bab402984cd203a7909b86f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/ca0b175e4bab402984cd203a7909b86f 2024-12-10T14:27:39,421 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/39c5c863e72c4c26a5b6e5d8d379db0f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/39c5c863e72c4c26a5b6e5d8d379db0f 2024-12-10T14:27:39,423 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/B of d2de72cdb20a8dd601845b8e001f941b into ca0b175e4bab402984cd203a7909b86f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:39,423 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:39,423 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/B, priority=13, startTime=1733840859390; duration=0sec 2024-12-10T14:27:39,423 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:39,423 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:B 2024-12-10T14:27:39,423 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:39,424 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:39,424 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/C is initiating minor compaction (all files) 2024-12-10T14:27:39,425 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/C in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:39,425 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/8a717162bd8c4fa8955a18fb9ae06b8e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f4ccb12387ce405cbdb1e0da5695d312, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4ce0892861e145fbb01f54f8eeb4fdf8] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=36.5 K 2024-12-10T14:27:39,426 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/A of d2de72cdb20a8dd601845b8e001f941b into 39c5c863e72c4c26a5b6e5d8d379db0f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:39,426 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:39,426 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/A, priority=13, startTime=1733840859390; duration=0sec 2024-12-10T14:27:39,426 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:39,426 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:A 2024-12-10T14:27:39,426 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a717162bd8c4fa8955a18fb9ae06b8e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1733840855109 2024-12-10T14:27:39,426 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f4ccb12387ce405cbdb1e0da5695d312, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733840855752 2024-12-10T14:27:39,427 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ce0892861e145fbb01f54f8eeb4fdf8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840857908 2024-12-10T14:27:39,435 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#C#compaction#407 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:39,436 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/a075be228fc740018c37747b1d489eb4 is 50, key is test_row_0/C:col10/1733840857908/Put/seqid=0 2024-12-10T14:27:39,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742308_1484 (size=12983) 2024-12-10T14:27:39,444 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/a075be228fc740018c37747b1d489eb4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/a075be228fc740018c37747b1d489eb4 2024-12-10T14:27:39,449 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/C of d2de72cdb20a8dd601845b8e001f941b into a075be228fc740018c37747b1d489eb4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:39,449 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:39,449 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/C, priority=13, startTime=1733840859390; duration=0sec 2024-12-10T14:27:39,449 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:39,449 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:C 2024-12-10T14:27:39,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:39,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-10T14:27:39,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:39,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:39,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:39,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:39,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:39,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:39,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T14:27:39,688 INFO [Thread-1899 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-10T14:27:39,689 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:39,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/d92ffe2ab3874907b826f489648f0ac6 is 50, key is test_row_0/A:col10/1733840858565/Put/seqid=0 2024-12-10T14:27:39,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-10T14:27:39,695 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:39,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T14:27:39,696 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:39,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:39,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742309_1485 (size=12301) 2024-12-10T14:27:39,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/d92ffe2ab3874907b826f489648f0ac6 2024-12-10T14:27:39,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/bc589bf3802841b49134645c97904afe is 50, key is test_row_0/B:col10/1733840858565/Put/seqid=0 2024-12-10T14:27:39,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840919705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840919708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840919709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742310_1486 (size=12301) 2024-12-10T14:27:39,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T14:27:39,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840919810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840919811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:39,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840919813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:39,848 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:39,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T14:27:39,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:39,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:39,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:39,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:39,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T14:27:40,001 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:40,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T14:27:40,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:40,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840920014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840920015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840920015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/bc589bf3802841b49134645c97904afe 2024-12-10T14:27:40,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/3c7f9fab372e47679c394445a1c1602c is 50, key is test_row_0/C:col10/1733840858565/Put/seqid=0 2024-12-10T14:27:40,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742311_1487 (size=12301) 2024-12-10T14:27:40,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:40,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T14:27:40,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:40,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T14:27:40,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:40,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T14:27:40,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:40,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840920318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840920319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840920321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,458 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:40,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T14:27:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:40,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:40,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/3c7f9fab372e47679c394445a1c1602c 2024-12-10T14:27:40,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/d92ffe2ab3874907b826f489648f0ac6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/d92ffe2ab3874907b826f489648f0ac6 2024-12-10T14:27:40,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/d92ffe2ab3874907b826f489648f0ac6, entries=150, sequenceid=314, filesize=12.0 K 2024-12-10T14:27:40,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/bc589bf3802841b49134645c97904afe as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/bc589bf3802841b49134645c97904afe 2024-12-10T14:27:40,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/bc589bf3802841b49134645c97904afe, entries=150, sequenceid=314, filesize=12.0 K 2024-12-10T14:27:40,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/3c7f9fab372e47679c394445a1c1602c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/3c7f9fab372e47679c394445a1c1602c 2024-12-10T14:27:40,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/3c7f9fab372e47679c394445a1c1602c, entries=150, sequenceid=314, filesize=12.0 K 2024-12-10T14:27:40,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for d2de72cdb20a8dd601845b8e001f941b in 866ms, sequenceid=314, compaction requested=false 2024-12-10T14:27:40,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:40,611 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:40,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T14:27:40,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:40,611 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:27:40,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:40,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:40,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:40,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:40,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:40,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:40,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/0a69231441c34d79ac440f4e0b5ca5e4 is 50, key is test_row_0/A:col10/1733840859707/Put/seqid=0 2024-12-10T14:27:40,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742312_1488 (size=12301) 2024-12-10T14:27:40,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T14:27:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:40,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. as already flushing 2024-12-10T14:27:40,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840920857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840920860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840920864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840920965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840920966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:40,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:40,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840920970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:41,019 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/0a69231441c34d79ac440f4e0b5ca5e4 2024-12-10T14:27:41,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/7317998134a7440faf239bde15ffba33 is 50, key is test_row_0/B:col10/1733840859707/Put/seqid=0 2024-12-10T14:27:41,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742313_1489 (size=12301) 2024-12-10T14:27:41,066 DEBUG [Thread-1904 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2405c04e to 127.0.0.1:58494 2024-12-10T14:27:41,066 DEBUG [Thread-1904 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,068 DEBUG [Thread-1908 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x593af048 to 127.0.0.1:58494 2024-12-10T14:27:41,068 DEBUG [Thread-1908 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,069 DEBUG [Thread-1906 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73d92042 to 127.0.0.1:58494 2024-12-10T14:27:41,069 DEBUG [Thread-1906 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,071 DEBUG [Thread-1902 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:58494 2024-12-10T14:27:41,071 DEBUG [Thread-1902 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,073 DEBUG [Thread-1900 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:58494 2024-12-10T14:27:41,073 DEBUG [Thread-1900 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:41,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840921171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840921171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:41,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:41,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840921172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:41,430 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/7317998134a7440faf239bde15ffba33 2024-12-10T14:27:41,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/f5194c2243e14fc68f4646791967cc1b is 50, key is test_row_0/C:col10/1733840859707/Put/seqid=0 2024-12-10T14:27:41,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742314_1490 (size=12301) 2024-12-10T14:27:41,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:41,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:41,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52274 deadline: 1733840921472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:41,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52260 deadline: 1733840921472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:41,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:41,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52300 deadline: 1733840921475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:41,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T14:27:41,838 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/f5194c2243e14fc68f4646791967cc1b 2024-12-10T14:27:41,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/0a69231441c34d79ac440f4e0b5ca5e4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/0a69231441c34d79ac440f4e0b5ca5e4 2024-12-10T14:27:41,843 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/0a69231441c34d79ac440f4e0b5ca5e4, entries=150, sequenceid=329, filesize=12.0 K 2024-12-10T14:27:41,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/7317998134a7440faf239bde15ffba33 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/7317998134a7440faf239bde15ffba33 2024-12-10T14:27:41,846 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/7317998134a7440faf239bde15ffba33, entries=150, sequenceid=329, filesize=12.0 K 2024-12-10T14:27:41,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/f5194c2243e14fc68f4646791967cc1b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f5194c2243e14fc68f4646791967cc1b 2024-12-10T14:27:41,850 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f5194c2243e14fc68f4646791967cc1b, entries=150, sequenceid=329, filesize=12.0 K 2024-12-10T14:27:41,850 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d2de72cdb20a8dd601845b8e001f941b in 1239ms, sequenceid=329, compaction requested=true 2024-12-10T14:27:41,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:41,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:41,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-10T14:27:41,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-10T14:27:41,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-10T14:27:41,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1550 sec 2024-12-10T14:27:41,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.1640 sec 2024-12-10T14:27:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:41,975 DEBUG [Thread-1893 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:58494 2024-12-10T14:27:41,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T14:27:41,976 DEBUG [Thread-1893 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:41,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:41,977 DEBUG [Thread-1889 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:58494 2024-12-10T14:27:41,977 DEBUG [Thread-1889 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,979 DEBUG [Thread-1895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:58494 2024-12-10T14:27:41,979 DEBUG [Thread-1895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:41,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a324a06a80ca43fe9c54912b409ebe7e is 50, key is test_row_0/A:col10/1733840860863/Put/seqid=0 2024-12-10T14:27:41,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742315_1491 (size=12301) 2024-12-10T14:27:42,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a324a06a80ca43fe9c54912b409ebe7e 2024-12-10T14:27:42,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/f80bfba7589a4296a9713e03f793c92c is 50, key is test_row_0/B:col10/1733840860863/Put/seqid=0 2024-12-10T14:27:42,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742316_1492 (size=12301) 2024-12-10T14:27:42,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/f80bfba7589a4296a9713e03f793c92c 2024-12-10T14:27:42,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/121327236d9c485bbf63845e321d7312 is 50, key is test_row_0/C:col10/1733840860863/Put/seqid=0 2024-12-10T14:27:42,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742317_1493 (size=12301) 2024-12-10T14:27:43,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/121327236d9c485bbf63845e321d7312 2024-12-10T14:27:43,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a324a06a80ca43fe9c54912b409ebe7e as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a324a06a80ca43fe9c54912b409ebe7e 2024-12-10T14:27:43,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a324a06a80ca43fe9c54912b409ebe7e, entries=150, sequenceid=351, filesize=12.0 K 2024-12-10T14:27:43,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/f80bfba7589a4296a9713e03f793c92c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/f80bfba7589a4296a9713e03f793c92c 2024-12-10T14:27:43,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/f80bfba7589a4296a9713e03f793c92c, entries=150, sequenceid=351, filesize=12.0 K 2024-12-10T14:27:43,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/121327236d9c485bbf63845e321d7312 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/121327236d9c485bbf63845e321d7312 2024-12-10T14:27:43,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/121327236d9c485bbf63845e321d7312, entries=150, sequenceid=351, filesize=12.0 K 2024-12-10T14:27:43,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=13.42 KB/13740 for d2de72cdb20a8dd601845b8e001f941b in 1238ms, sequenceid=351, compaction requested=true 2024-12-10T14:27:43,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:43,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:43,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:43,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:43,213 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:43,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:43,213 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:43,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d2de72cdb20a8dd601845b8e001f941b:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:43,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/A is initiating minor compaction (all files) 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/B is initiating minor compaction (all files) 2024-12-10T14:27:43,214 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/A in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:43,214 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/B in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:43,214 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/39c5c863e72c4c26a5b6e5d8d379db0f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/d92ffe2ab3874907b826f489648f0ac6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/0a69231441c34d79ac440f4e0b5ca5e4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a324a06a80ca43fe9c54912b409ebe7e] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=48.7 K 2024-12-10T14:27:43,214 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/ca0b175e4bab402984cd203a7909b86f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/bc589bf3802841b49134645c97904afe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/7317998134a7440faf239bde15ffba33, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/f80bfba7589a4296a9713e03f793c92c] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=48.7 K 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39c5c863e72c4c26a5b6e5d8d379db0f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840857908 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ca0b175e4bab402984cd203a7909b86f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840857908 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting d92ffe2ab3874907b826f489648f0ac6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1733840858557 2024-12-10T14:27:43,214 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting bc589bf3802841b49134645c97904afe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1733840858557 2024-12-10T14:27:43,215 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a69231441c34d79ac440f4e0b5ca5e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733840859700 2024-12-10T14:27:43,215 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7317998134a7440faf239bde15ffba33, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733840859700 2024-12-10T14:27:43,215 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a324a06a80ca43fe9c54912b409ebe7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733840860856 2024-12-10T14:27:43,215 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f80bfba7589a4296a9713e03f793c92c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733840860856 2024-12-10T14:27:43,222 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#A#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:43,222 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#B#compaction#418 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:43,222 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a7f80c95e115489bb8100eb85d47f3a9 is 50, key is test_row_0/A:col10/1733840860863/Put/seqid=0 2024-12-10T14:27:43,222 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/80f08b0da69e4418a32f7077e34a9eae is 50, key is test_row_0/B:col10/1733840860863/Put/seqid=0 2024-12-10T14:27:43,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742318_1494 (size=13119) 2024-12-10T14:27:43,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742319_1495 (size=13119) 2024-12-10T14:27:43,629 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/80f08b0da69e4418a32f7077e34a9eae as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/80f08b0da69e4418a32f7077e34a9eae 2024-12-10T14:27:43,629 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/a7f80c95e115489bb8100eb85d47f3a9 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a7f80c95e115489bb8100eb85d47f3a9 2024-12-10T14:27:43,632 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/B of d2de72cdb20a8dd601845b8e001f941b into 80f08b0da69e4418a32f7077e34a9eae(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:43,632 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/A of d2de72cdb20a8dd601845b8e001f941b into a7f80c95e115489bb8100eb85d47f3a9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:43,632 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:43,632 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:43,632 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/A, priority=12, startTime=1733840863213; duration=0sec 2024-12-10T14:27:43,632 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/B, priority=12, startTime=1733840863213; duration=0sec 2024-12-10T14:27:43,632 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:43,632 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:A 2024-12-10T14:27:43,632 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:43,632 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:B 2024-12-10T14:27:43,632 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:27:43,633 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:27:43,633 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): d2de72cdb20a8dd601845b8e001f941b/C is initiating minor compaction (all files) 2024-12-10T14:27:43,633 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d2de72cdb20a8dd601845b8e001f941b/C in TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:43,633 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/a075be228fc740018c37747b1d489eb4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/3c7f9fab372e47679c394445a1c1602c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f5194c2243e14fc68f4646791967cc1b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/121327236d9c485bbf63845e321d7312] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp, totalSize=48.7 K 2024-12-10T14:27:43,634 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting a075be228fc740018c37747b1d489eb4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840857908 2024-12-10T14:27:43,634 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c7f9fab372e47679c394445a1c1602c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1733840858557 2024-12-10T14:27:43,634 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5194c2243e14fc68f4646791967cc1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733840859700 2024-12-10T14:27:43,634 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 121327236d9c485bbf63845e321d7312, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733840860856 2024-12-10T14:27:43,640 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d2de72cdb20a8dd601845b8e001f941b#C#compaction#419 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:43,640 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4b8cf9052b874aceb3b4a7d509e1541c is 50, key is test_row_0/C:col10/1733840860863/Put/seqid=0 2024-12-10T14:27:43,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742320_1496 (size=13119) 2024-12-10T14:27:43,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T14:27:43,800 INFO [Thread-1899 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-10T14:27:44,047 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/4b8cf9052b874aceb3b4a7d509e1541c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4b8cf9052b874aceb3b4a7d509e1541c 2024-12-10T14:27:44,050 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d2de72cdb20a8dd601845b8e001f941b/C of d2de72cdb20a8dd601845b8e001f941b into 4b8cf9052b874aceb3b4a7d509e1541c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:44,050 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:44,050 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b., storeName=d2de72cdb20a8dd601845b8e001f941b/C, priority=12, startTime=1733840863213; duration=0sec 2024-12-10T14:27:44,050 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:44,050 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d2de72cdb20a8dd601845b8e001f941b:C 2024-12-10T14:27:46,178 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:27:48,097 DEBUG [Thread-1897 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:58494 2024-12-10T14:27:48,097 DEBUG [Thread-1897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:48,129 DEBUG [Thread-1891 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:58494 2024-12-10T14:27:48,129 DEBUG [Thread-1891 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3006 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9018 rows 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2971 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8913 rows 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3000 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9000 rows 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3004 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9012 rows 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2992 2024-12-10T14:27:48,130 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8976 rows 2024-12-10T14:27:48,130 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:27:48,130 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:58494 2024-12-10T14:27:48,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:27:48,132 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T14:27:48,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T14:27:48,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:48,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T14:27:48,135 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840868135"}]},"ts":"1733840868135"} 2024-12-10T14:27:48,136 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T14:27:48,138 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T14:27:48,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:27:48,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2de72cdb20a8dd601845b8e001f941b, UNASSIGN}] 2024-12-10T14:27:48,141 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2de72cdb20a8dd601845b8e001f941b, UNASSIGN 2024-12-10T14:27:48,142 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=d2de72cdb20a8dd601845b8e001f941b, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:48,142 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:27:48,142 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; CloseRegionProcedure d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:27:48,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T14:27:48,293 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:48,294 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(124): Close d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1681): Closing d2de72cdb20a8dd601845b8e001f941b, disabling compactions & flushes 2024-12-10T14:27:48,294 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. after waiting 0 ms 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:48,294 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(2837): Flushing d2de72cdb20a8dd601845b8e001f941b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=A 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=B 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d2de72cdb20a8dd601845b8e001f941b, store=C 2024-12-10T14:27:48,294 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:48,297 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/2aac2abedb3043c89b60b729eabedd06 is 50, key is test_row_0/A:col10/1733840868129/Put/seqid=0 2024-12-10T14:27:48,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742321_1497 (size=9857) 2024-12-10T14:27:48,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T14:27:48,701 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/2aac2abedb3043c89b60b729eabedd06 2024-12-10T14:27:48,707 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/8416c23c2bd748ac9c902ad049243937 is 50, key is test_row_0/B:col10/1733840868129/Put/seqid=0 2024-12-10T14:27:48,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742322_1498 (size=9857) 2024-12-10T14:27:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T14:27:49,110 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/8416c23c2bd748ac9c902ad049243937 2024-12-10T14:27:49,116 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/30d3412c9af9494a9fca2a0911cce39c is 50, key is test_row_0/C:col10/1733840868129/Put/seqid=0 2024-12-10T14:27:49,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742323_1499 (size=9857) 2024-12-10T14:27:49,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T14:27:49,519 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=361 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/30d3412c9af9494a9fca2a0911cce39c 2024-12-10T14:27:49,522 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/A/2aac2abedb3043c89b60b729eabedd06 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/2aac2abedb3043c89b60b729eabedd06 2024-12-10T14:27:49,525 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/2aac2abedb3043c89b60b729eabedd06, entries=100, sequenceid=361, filesize=9.6 K 2024-12-10T14:27:49,525 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/B/8416c23c2bd748ac9c902ad049243937 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8416c23c2bd748ac9c902ad049243937 2024-12-10T14:27:49,527 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8416c23c2bd748ac9c902ad049243937, entries=100, sequenceid=361, filesize=9.6 K 2024-12-10T14:27:49,528 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/.tmp/C/30d3412c9af9494a9fca2a0911cce39c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/30d3412c9af9494a9fca2a0911cce39c 2024-12-10T14:27:49,530 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/30d3412c9af9494a9fca2a0911cce39c, entries=100, sequenceid=361, filesize=9.6 K 2024-12-10T14:27:49,531 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for d2de72cdb20a8dd601845b8e001f941b in 1237ms, sequenceid=361, compaction requested=false 2024-12-10T14:27:49,531 DEBUG [StoreCloser-TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/81429b6c13a64ac5ba870ffde14384ac, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/da485c7f31614619a535a6a39a629762, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cac19e98b4ae4b84a3cc1396e93666f0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6c60c3ef90fe46dda39c4734d4fe8130, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cfa02e1d3f224afdb3fe6dcac7d786f0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b5de61e68ff34e5c9ed9463a3d5e1b46, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/1868f71fb2aa48b4b8bce5e838637926, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a1625115f79c49f0b559f2df4cbf9508, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/130f6430b78944f99d67154dc34ddea3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/9ef3354185714fe692b56fdf10a5530c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/7920d1740a14471e8c59cec93b80370a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/599bca0ce79a41e4b9c54c2e406c01db, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6a9570973b8e4e52a8b9f5db7b858dee, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b86a60130e4f4e1a827814ae858539db, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a34994738f454dd08953eb19a801ac39, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b043dde992774cfb8b47642dd4c0129d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a05aa5fa9cdc420c9885c371508a159b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/09caf8c8bb56444685de64df11647a38, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/560f3934cee44f548c1048ac1c9b3d13, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/39c5c863e72c4c26a5b6e5d8d379db0f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/d92ffe2ab3874907b826f489648f0ac6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/0a69231441c34d79ac440f4e0b5ca5e4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a324a06a80ca43fe9c54912b409ebe7e] to archive 2024-12-10T14:27:49,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:27:49,534 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cac19e98b4ae4b84a3cc1396e93666f0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cac19e98b4ae4b84a3cc1396e93666f0 2024-12-10T14:27:49,534 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/81429b6c13a64ac5ba870ffde14384ac to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/81429b6c13a64ac5ba870ffde14384ac 2024-12-10T14:27:49,534 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6c60c3ef90fe46dda39c4734d4fe8130 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6c60c3ef90fe46dda39c4734d4fe8130 2024-12-10T14:27:49,535 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/da485c7f31614619a535a6a39a629762 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/da485c7f31614619a535a6a39a629762 2024-12-10T14:27:49,535 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cfa02e1d3f224afdb3fe6dcac7d786f0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/cfa02e1d3f224afdb3fe6dcac7d786f0 2024-12-10T14:27:49,535 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b5de61e68ff34e5c9ed9463a3d5e1b46 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b5de61e68ff34e5c9ed9463a3d5e1b46 2024-12-10T14:27:49,535 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/1868f71fb2aa48b4b8bce5e838637926 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/1868f71fb2aa48b4b8bce5e838637926 2024-12-10T14:27:49,535 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/5b2d9f8482d94aa4b84ff5e1efc9b2f8 2024-12-10T14:27:49,536 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a1625115f79c49f0b559f2df4cbf9508 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a1625115f79c49f0b559f2df4cbf9508 2024-12-10T14:27:49,536 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/7920d1740a14471e8c59cec93b80370a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/7920d1740a14471e8c59cec93b80370a 2024-12-10T14:27:49,536 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/130f6430b78944f99d67154dc34ddea3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/130f6430b78944f99d67154dc34ddea3 2024-12-10T14:27:49,537 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/599bca0ce79a41e4b9c54c2e406c01db to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/599bca0ce79a41e4b9c54c2e406c01db 2024-12-10T14:27:49,537 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/9ef3354185714fe692b56fdf10a5530c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/9ef3354185714fe692b56fdf10a5530c 2024-12-10T14:27:49,537 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b86a60130e4f4e1a827814ae858539db to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b86a60130e4f4e1a827814ae858539db 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6a9570973b8e4e52a8b9f5db7b858dee to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/6a9570973b8e4e52a8b9f5db7b858dee 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a34994738f454dd08953eb19a801ac39 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a34994738f454dd08953eb19a801ac39 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b043dde992774cfb8b47642dd4c0129d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/b043dde992774cfb8b47642dd4c0129d 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a05aa5fa9cdc420c9885c371508a159b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a05aa5fa9cdc420c9885c371508a159b 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/09caf8c8bb56444685de64df11647a38 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/09caf8c8bb56444685de64df11647a38 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/39c5c863e72c4c26a5b6e5d8d379db0f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/39c5c863e72c4c26a5b6e5d8d379db0f 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/d92ffe2ab3874907b826f489648f0ac6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/d92ffe2ab3874907b826f489648f0ac6 2024-12-10T14:27:49,538 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/560f3934cee44f548c1048ac1c9b3d13 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/560f3934cee44f548c1048ac1c9b3d13 2024-12-10T14:27:49,539 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/0a69231441c34d79ac440f4e0b5ca5e4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/0a69231441c34d79ac440f4e0b5ca5e4 2024-12-10T14:27:49,539 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a324a06a80ca43fe9c54912b409ebe7e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a324a06a80ca43fe9c54912b409ebe7e 2024-12-10T14:27:49,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/2afc1c045f854314b324b2280e8f4e23, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/1688992b2bce403ebc6fc616866e114f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57c5b07a81fd485eaf9b35d88af2191f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57a0883eca5c4ea683736cfe3c000713, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/6400d11e3fa74cf7ae892c3189a20ad6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6319c9981074936a2c74bf22d901d8a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6cb09cf0ffe4408b420394f9cd4be00, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/67a2f59be0dd4fb0897bef888250eced, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/b56711b7517042de9037a03d242ade61, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/a5a16851f7ce47bd8bf670dae33d9318, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/4272f0caddf44f9488716982ae81ee5c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/e7466b1264aa44979fbc782c4d54499c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/44c0595877ce4b70a61b3df7161fc747, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fede36cd60d74c6f91fedd65441327d8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/5bd077a30c594ad08ad03abd21df6c38, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8ea40c2bf17445bc8b76f5e291579e8c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/34f9134fbdde428cad873179675c6411, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/92eddecfaa3a4a62826864c0e81bbb97, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c5b2ff95e2254476b5c6d264998fc61a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/ca0b175e4bab402984cd203a7909b86f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fa42a17cdfdc489c92213405806a347a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/bc589bf3802841b49134645c97904afe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/7317998134a7440faf239bde15ffba33, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/f80bfba7589a4296a9713e03f793c92c] to archive 2024-12-10T14:27:49,541 DEBUG [StoreCloser-TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:27:49,542 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/2afc1c045f854314b324b2280e8f4e23 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/2afc1c045f854314b324b2280e8f4e23 2024-12-10T14:27:49,542 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/1688992b2bce403ebc6fc616866e114f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/1688992b2bce403ebc6fc616866e114f 2024-12-10T14:27:49,542 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57c5b07a81fd485eaf9b35d88af2191f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57c5b07a81fd485eaf9b35d88af2191f 2024-12-10T14:27:49,542 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6319c9981074936a2c74bf22d901d8a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6319c9981074936a2c74bf22d901d8a 2024-12-10T14:27:49,543 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57a0883eca5c4ea683736cfe3c000713 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/57a0883eca5c4ea683736cfe3c000713 2024-12-10T14:27:49,543 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6cb09cf0ffe4408b420394f9cd4be00 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c6cb09cf0ffe4408b420394f9cd4be00 2024-12-10T14:27:49,543 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/6400d11e3fa74cf7ae892c3189a20ad6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/6400d11e3fa74cf7ae892c3189a20ad6 2024-12-10T14:27:49,543 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/67a2f59be0dd4fb0897bef888250eced to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/67a2f59be0dd4fb0897bef888250eced 2024-12-10T14:27:49,544 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/b56711b7517042de9037a03d242ade61 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/b56711b7517042de9037a03d242ade61 2024-12-10T14:27:49,544 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/4272f0caddf44f9488716982ae81ee5c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/4272f0caddf44f9488716982ae81ee5c 2024-12-10T14:27:49,544 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/44c0595877ce4b70a61b3df7161fc747 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/44c0595877ce4b70a61b3df7161fc747 2024-12-10T14:27:49,544 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/a5a16851f7ce47bd8bf670dae33d9318 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/a5a16851f7ce47bd8bf670dae33d9318 2024-12-10T14:27:49,544 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/5bd077a30c594ad08ad03abd21df6c38 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/5bd077a30c594ad08ad03abd21df6c38 2024-12-10T14:27:49,544 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fede36cd60d74c6f91fedd65441327d8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fede36cd60d74c6f91fedd65441327d8 2024-12-10T14:27:49,544 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/e7466b1264aa44979fbc782c4d54499c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/e7466b1264aa44979fbc782c4d54499c 2024-12-10T14:27:49,545 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8ea40c2bf17445bc8b76f5e291579e8c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8ea40c2bf17445bc8b76f5e291579e8c 2024-12-10T14:27:49,545 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/92eddecfaa3a4a62826864c0e81bbb97 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/92eddecfaa3a4a62826864c0e81bbb97 2024-12-10T14:27:49,545 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/34f9134fbdde428cad873179675c6411 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/34f9134fbdde428cad873179675c6411 2024-12-10T14:27:49,546 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/bc589bf3802841b49134645c97904afe to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/bc589bf3802841b49134645c97904afe 2024-12-10T14:27:49,546 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c5b2ff95e2254476b5c6d264998fc61a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/c5b2ff95e2254476b5c6d264998fc61a 2024-12-10T14:27:49,546 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/ca0b175e4bab402984cd203a7909b86f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/ca0b175e4bab402984cd203a7909b86f 2024-12-10T14:27:49,546 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/7317998134a7440faf239bde15ffba33 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/7317998134a7440faf239bde15ffba33 2024-12-10T14:27:49,546 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fa42a17cdfdc489c92213405806a347a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/fa42a17cdfdc489c92213405806a347a 2024-12-10T14:27:49,546 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/f80bfba7589a4296a9713e03f793c92c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/f80bfba7589a4296a9713e03f793c92c 2024-12-10T14:27:49,547 DEBUG [StoreCloser-TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b6d9651cc3f14884887e252429cbb014, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/26e9c1b60cd8424a85ff2223f72382f2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca188d4ca1784193b86e26de27c29bad, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/35f8924a06464ae288ec6361ca84a5f5, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/369e9de332a640c7803c9bc9ec31b255, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6818f378f16b4a2c9805e1488ea06647, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b8339c22770346adb72dd71650df8a77, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/05a46b7c30d34fa1b72a140261578b03, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/d77c0ba1f66e4cbca185933598cecdf0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/743618e951d34d248bde437dd64ce2b2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/87b51d053c2e44c3b307b0bec421293e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/2d763ab7372146edab2ddda4dfee3ab7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca70733a8f0d48c3a90e177f7b22292a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b12231ed1d834f648f065ab6dc392374, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4a13a1edae824250a5cee897a2f8e767, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/94aa1adbfb0e474fbd84ee97b2481a83, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/8a717162bd8c4fa8955a18fb9ae06b8e, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6ecc9681580d477b8a2fc6e6f6b0dbe0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f4ccb12387ce405cbdb1e0da5695d312, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/a075be228fc740018c37747b1d489eb4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4ce0892861e145fbb01f54f8eeb4fdf8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/3c7f9fab372e47679c394445a1c1602c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f5194c2243e14fc68f4646791967cc1b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/121327236d9c485bbf63845e321d7312] to archive 2024-12-10T14:27:49,548 DEBUG [StoreCloser-TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:27:49,549 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/26e9c1b60cd8424a85ff2223f72382f2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/26e9c1b60cd8424a85ff2223f72382f2 2024-12-10T14:27:49,549 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b6d9651cc3f14884887e252429cbb014 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b6d9651cc3f14884887e252429cbb014 2024-12-10T14:27:49,549 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/35f8924a06464ae288ec6361ca84a5f5 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/35f8924a06464ae288ec6361ca84a5f5 2024-12-10T14:27:49,549 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/369e9de332a640c7803c9bc9ec31b255 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/369e9de332a640c7803c9bc9ec31b255 2024-12-10T14:27:49,549 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6818f378f16b4a2c9805e1488ea06647 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6818f378f16b4a2c9805e1488ea06647 2024-12-10T14:27:49,549 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca188d4ca1784193b86e26de27c29bad to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca188d4ca1784193b86e26de27c29bad 2024-12-10T14:27:49,550 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/05a46b7c30d34fa1b72a140261578b03 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/05a46b7c30d34fa1b72a140261578b03 2024-12-10T14:27:49,550 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b8339c22770346adb72dd71650df8a77 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b8339c22770346adb72dd71650df8a77 2024-12-10T14:27:49,550 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/743618e951d34d248bde437dd64ce2b2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/743618e951d34d248bde437dd64ce2b2 2024-12-10T14:27:49,551 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/d77c0ba1f66e4cbca185933598cecdf0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/d77c0ba1f66e4cbca185933598cecdf0 2024-12-10T14:27:49,551 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b12231ed1d834f648f065ab6dc392374 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/b12231ed1d834f648f065ab6dc392374 2024-12-10T14:27:49,551 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/87b51d053c2e44c3b307b0bec421293e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/87b51d053c2e44c3b307b0bec421293e 2024-12-10T14:27:49,551 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca70733a8f0d48c3a90e177f7b22292a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/ca70733a8f0d48c3a90e177f7b22292a 2024-12-10T14:27:49,552 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4a13a1edae824250a5cee897a2f8e767 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4a13a1edae824250a5cee897a2f8e767 2024-12-10T14:27:49,552 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/94aa1adbfb0e474fbd84ee97b2481a83 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/94aa1adbfb0e474fbd84ee97b2481a83 2024-12-10T14:27:49,552 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/2d763ab7372146edab2ddda4dfee3ab7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/2d763ab7372146edab2ddda4dfee3ab7 2024-12-10T14:27:49,552 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/8a717162bd8c4fa8955a18fb9ae06b8e to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/8a717162bd8c4fa8955a18fb9ae06b8e 2024-12-10T14:27:49,552 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f4ccb12387ce405cbdb1e0da5695d312 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f4ccb12387ce405cbdb1e0da5695d312 2024-12-10T14:27:49,552 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6ecc9681580d477b8a2fc6e6f6b0dbe0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/6ecc9681580d477b8a2fc6e6f6b0dbe0 2024-12-10T14:27:49,553 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/3c7f9fab372e47679c394445a1c1602c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/3c7f9fab372e47679c394445a1c1602c 2024-12-10T14:27:49,553 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/a075be228fc740018c37747b1d489eb4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/a075be228fc740018c37747b1d489eb4 2024-12-10T14:27:49,553 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f5194c2243e14fc68f4646791967cc1b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/f5194c2243e14fc68f4646791967cc1b 2024-12-10T14:27:49,553 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/121327236d9c485bbf63845e321d7312 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/121327236d9c485bbf63845e321d7312 2024-12-10T14:27:49,553 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4ce0892861e145fbb01f54f8eeb4fdf8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4ce0892861e145fbb01f54f8eeb4fdf8 2024-12-10T14:27:49,557 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/recovered.edits/364.seqid, newMaxSeqId=364, maxSeqId=1 2024-12-10T14:27:49,557 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b. 2024-12-10T14:27:49,557 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1635): Region close journal for d2de72cdb20a8dd601845b8e001f941b: 2024-12-10T14:27:49,558 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(170): Closed d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:49,559 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=d2de72cdb20a8dd601845b8e001f941b, regionState=CLOSED 2024-12-10T14:27:49,560 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-10T14:27:49,560 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseRegionProcedure d2de72cdb20a8dd601845b8e001f941b, server=db1d50717577,46699,1733840717757 in 1.4170 sec 2024-12-10T14:27:49,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-10T14:27:49,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d2de72cdb20a8dd601845b8e001f941b, UNASSIGN in 1.4200 sec 2024-12-10T14:27:49,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-10T14:27:49,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4230 sec 2024-12-10T14:27:49,563 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840869563"}]},"ts":"1733840869563"} 2024-12-10T14:27:49,564 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T14:27:49,566 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T14:27:49,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4340 sec 2024-12-10T14:27:50,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T14:27:50,238 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-10T14:27:50,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T14:27:50,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:50,240 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:50,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-10T14:27:50,240 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=144, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:50,241 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:50,243 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/recovered.edits] 2024-12-10T14:27:50,245 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a7f80c95e115489bb8100eb85d47f3a9 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/a7f80c95e115489bb8100eb85d47f3a9 2024-12-10T14:27:50,245 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/2aac2abedb3043c89b60b729eabedd06 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/A/2aac2abedb3043c89b60b729eabedd06 2024-12-10T14:27:50,247 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/80f08b0da69e4418a32f7077e34a9eae to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/80f08b0da69e4418a32f7077e34a9eae 2024-12-10T14:27:50,247 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8416c23c2bd748ac9c902ad049243937 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/B/8416c23c2bd748ac9c902ad049243937 2024-12-10T14:27:50,249 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/30d3412c9af9494a9fca2a0911cce39c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/30d3412c9af9494a9fca2a0911cce39c 2024-12-10T14:27:50,249 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4b8cf9052b874aceb3b4a7d509e1541c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/C/4b8cf9052b874aceb3b4a7d509e1541c 2024-12-10T14:27:50,251 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/recovered.edits/364.seqid to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b/recovered.edits/364.seqid 2024-12-10T14:27:50,252 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/d2de72cdb20a8dd601845b8e001f941b 2024-12-10T14:27:50,252 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T14:27:50,254 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=144, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:50,255 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T14:27:50,256 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T14:27:50,257 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=144, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:50,257 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T14:27:50,257 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733840870257"}]},"ts":"9223372036854775807"} 2024-12-10T14:27:50,258 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T14:27:50,258 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d2de72cdb20a8dd601845b8e001f941b, NAME => 'TestAcidGuarantees,,1733840838894.d2de72cdb20a8dd601845b8e001f941b.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T14:27:50,258 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T14:27:50,259 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733840870258"}]},"ts":"9223372036854775807"} 2024-12-10T14:27:50,260 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T14:27:50,262 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=144, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:50,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 23 msec 2024-12-10T14:27:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-10T14:27:50,341 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-10T14:27:50,350 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 244), OpenFileDescriptor=450 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=303 (was 284) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2277 (was 2294) 2024-12-10T14:27:50,358 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=303, ProcessCount=11, AvailableMemoryMB=2276 2024-12-10T14:27:50,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:27:50,360 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:27:50,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:50,361 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T14:27:50,361 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:50,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 145 2024-12-10T14:27:50,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-10T14:27:50,362 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T14:27:50,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742324_1500 (size=963) 2024-12-10T14:27:50,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-10T14:27:50,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-10T14:27:50,768 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da 2024-12-10T14:27:50,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742325_1501 (size=53) 2024-12-10T14:27:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-10T14:27:51,173 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:27:51,173 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a09038ab689acaef0b961036bc4b4bd2, disabling compactions & flushes 2024-12-10T14:27:51,173 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,173 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,173 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. after waiting 0 ms 2024-12-10T14:27:51,174 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,174 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,174 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:51,174 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T14:27:51,175 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733840871174"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733840871174"}]},"ts":"1733840871174"} 2024-12-10T14:27:51,176 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T14:27:51,176 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T14:27:51,176 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840871176"}]},"ts":"1733840871176"} 2024-12-10T14:27:51,177 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T14:27:51,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, ASSIGN}] 2024-12-10T14:27:51,183 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, ASSIGN 2024-12-10T14:27:51,183 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, ASSIGN; state=OFFLINE, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=false 2024-12-10T14:27:51,334 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:51,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; OpenRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:27:51,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-10T14:27:51,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:51,489 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,489 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7285): Opening region: {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:27:51,489 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,489 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:27:51,489 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7327): checking encryption for a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,489 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7330): checking classloading for a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,490 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,491 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:51,491 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a09038ab689acaef0b961036bc4b4bd2 columnFamilyName A 2024-12-10T14:27:51,491 DEBUG [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:51,492 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(327): Store=a09038ab689acaef0b961036bc4b4bd2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:51,492 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,493 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:51,493 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a09038ab689acaef0b961036bc4b4bd2 columnFamilyName B 2024-12-10T14:27:51,493 DEBUG [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:51,493 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(327): Store=a09038ab689acaef0b961036bc4b4bd2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:51,493 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,494 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:51,494 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a09038ab689acaef0b961036bc4b4bd2 columnFamilyName C 2024-12-10T14:27:51,494 DEBUG [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:51,494 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(327): Store=a09038ab689acaef0b961036bc4b4bd2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:51,495 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,495 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,495 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,496 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:27:51,497 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1085): writing seq id for a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:51,499 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T14:27:51,499 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1102): Opened a09038ab689acaef0b961036bc4b4bd2; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60175317, jitterRate=-0.10331790149211884}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:27:51,500 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1001): Region open journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:51,500 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., pid=147, masterSystemTime=1733840871486 2024-12-10T14:27:51,501 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,501 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:51,501 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=OPEN, openSeqNum=2, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:51,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-10T14:27:51,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; OpenRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 in 167 msec 2024-12-10T14:27:51,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-10T14:27:51,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, ASSIGN in 321 msec 2024-12-10T14:27:51,505 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T14:27:51,505 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840871505"}]},"ts":"1733840871505"} 2024-12-10T14:27:51,505 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T14:27:51,507 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T14:27:51,508 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-12-10T14:27:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-10T14:27:52,465 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-12-10T14:27:52,466 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11030ef5 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1584f18a 2024-12-10T14:27:52,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7fe431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:52,476 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:52,477 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41740, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:52,478 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T14:27:52,479 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T14:27:52,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T14:27:52,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T14:27:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T14:27:52,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742326_1502 (size=999) 2024-12-10T14:27:52,889 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T14:27:52,889 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T14:27:52,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:27:52,892 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, REOPEN/MOVE}] 2024-12-10T14:27:52,892 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, REOPEN/MOVE 2024-12-10T14:27:52,893 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:52,893 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:27:52,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:27:53,045 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:53,045 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,045 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:27:53,045 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing a09038ab689acaef0b961036bc4b4bd2, disabling compactions & flushes 2024-12-10T14:27:53,045 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,045 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,045 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. after waiting 0 ms 2024-12-10T14:27:53,045 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,048 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T14:27:53,049 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,049 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:53,049 WARN [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionServer(3786): Not adding moved region record: a09038ab689acaef0b961036bc4b4bd2 to self. 2024-12-10T14:27:53,050 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,050 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=CLOSED 2024-12-10T14:27:53,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-10T14:27:53,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 in 157 msec 2024-12-10T14:27:53,052 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, REOPEN/MOVE; state=CLOSED, location=db1d50717577,46699,1733840717757; forceNewPlan=false, retain=true 2024-12-10T14:27:53,202 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=OPENING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE; OpenRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:27:53,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:53,357 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,357 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7285): Opening region: {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} 2024-12-10T14:27:53,358 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,358 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T14:27:53,358 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7327): checking encryption for a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,358 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7330): checking classloading for a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,359 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,360 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:53,360 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a09038ab689acaef0b961036bc4b4bd2 columnFamilyName A 2024-12-10T14:27:53,366 DEBUG [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:53,366 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(327): Store=a09038ab689acaef0b961036bc4b4bd2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:53,366 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,367 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:53,367 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a09038ab689acaef0b961036bc4b4bd2 columnFamilyName B 2024-12-10T14:27:53,367 DEBUG [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:53,367 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(327): Store=a09038ab689acaef0b961036bc4b4bd2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:53,368 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,368 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T14:27:53,368 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a09038ab689acaef0b961036bc4b4bd2 columnFamilyName C 2024-12-10T14:27:53,368 DEBUG [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:53,368 INFO [StoreOpener-a09038ab689acaef0b961036bc4b4bd2-1 {}] regionserver.HStore(327): Store=a09038ab689acaef0b961036bc4b4bd2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T14:27:53,369 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,369 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,370 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,371 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T14:27:53,372 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1085): writing seq id for a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,372 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1102): Opened a09038ab689acaef0b961036bc4b4bd2; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67690044, jitterRate=0.00866025686264038}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T14:27:53,373 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1001): Region open journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:53,373 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., pid=152, masterSystemTime=1733840873355 2024-12-10T14:27:53,374 DEBUG [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,374 INFO [RS_OPEN_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,375 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=OPEN, openSeqNum=5, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,377 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=150 2024-12-10T14:27:53,377 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=150, state=SUCCESS; OpenRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 in 172 msec 2024-12-10T14:27:53,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-10T14:27:53,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, REOPEN/MOVE in 485 msec 2024-12-10T14:27:53,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-10T14:27:53,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 488 msec 2024-12-10T14:27:53,380 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 899 msec 2024-12-10T14:27:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-10T14:27:53,381 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x69abefea to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b914bf4 2024-12-10T14:27:53,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@91d72db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,385 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e757135 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f6a59e4 2024-12-10T14:27:53,388 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d836f78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,388 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-12-10T14:27:53,392 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,393 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-12-10T14:27:53,395 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,396 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-12-10T14:27:53,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,402 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68c2838a to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@458a85fd 2024-12-10T14:27:53,404 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d832d43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,405 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x50bf224f to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@410bf0c8 2024-12-10T14:27:53,408 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15b6349f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,409 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79be903c to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@67adb273 2024-12-10T14:27:53,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@439b60d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,412 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d79f1c0 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@474dec36 2024-12-10T14:27:53,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f48b1c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,415 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x40dfd554 to 127.0.0.1:58494 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68dbad25 2024-12-10T14:27:53,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7287c75d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T14:27:53,422 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-10T14:27:53,424 DEBUG [hconnection-0x67e6d2ee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,424 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T14:27:53,424 DEBUG [hconnection-0x7203f17e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,424 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:53,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:53,424 DEBUG [hconnection-0x1bdfcb62-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,425 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,425 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41750, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,425 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41760, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,429 DEBUG [hconnection-0x1ffe05cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,429 DEBUG [hconnection-0x6f5b1f2c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,429 DEBUG [hconnection-0x1e354648-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,429 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41774, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,430 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,430 DEBUG [hconnection-0x6a9efee3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,430 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,431 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,432 DEBUG [hconnection-0x20b256e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T14:27:53,433 DEBUG [hconnection-0x7e93ad21-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,433 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,434 DEBUG [hconnection-0x2b8b5373-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T14:27:53,434 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:27:53,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:53,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:27:53,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:53,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:27:53,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:53,435 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T14:27:53,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840933445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840933446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840933447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840933447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840933448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121083feb94eae294094a7504e50c15bea43_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840873433/Put/seqid=0 2024-12-10T14:27:53,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742327_1503 (size=12154) 2024-12-10T14:27:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T14:27:53,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840933549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840933549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840933549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840933551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840933551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,576 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:53,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:53,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:53,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T14:27:53,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:53,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:53,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:53,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840933751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840933752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840933752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840933753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840933754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:53,862 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:53,866 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121083feb94eae294094a7504e50c15bea43_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121083feb94eae294094a7504e50c15bea43_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:53,866 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/bb9a00276564451aa08d1271cb5a31cb, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:53,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/bb9a00276564451aa08d1271cb5a31cb is 175, key is test_row_0/A:col10/1733840873433/Put/seqid=0 2024-12-10T14:27:53,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742328_1504 (size=30955) 2024-12-10T14:27:53,881 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:53,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:53,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:53,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:53,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:53,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T14:27:54,034 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:54,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:54,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:54,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,034 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840934054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840934054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840934055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840934055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840934055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,186 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:54,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:54,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:54,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,271 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/bb9a00276564451aa08d1271cb5a31cb 2024-12-10T14:27:54,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/0f18e3eae83545039b87bf0555a3fdb0 is 50, key is test_row_0/B:col10/1733840873433/Put/seqid=0 2024-12-10T14:27:54,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742329_1505 (size=12001) 2024-12-10T14:27:54,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:54,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:54,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:54,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:54,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:54,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:54,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T14:27:54,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840934558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840934559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840934560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840934560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:54,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840934561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:54,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:54,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:54,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:54,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:54,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/0f18e3eae83545039b87bf0555a3fdb0 2024-12-10T14:27:54,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/fdb343bca55543578511d134b556ceb3 is 50, key is test_row_0/C:col10/1733840873433/Put/seqid=0 2024-12-10T14:27:54,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742330_1506 (size=12001) 2024-12-10T14:27:54,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/fdb343bca55543578511d134b556ceb3 2024-12-10T14:27:54,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/bb9a00276564451aa08d1271cb5a31cb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/bb9a00276564451aa08d1271cb5a31cb 2024-12-10T14:27:54,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/bb9a00276564451aa08d1271cb5a31cb, entries=150, sequenceid=18, filesize=30.2 K 2024-12-10T14:27:54,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/0f18e3eae83545039b87bf0555a3fdb0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/0f18e3eae83545039b87bf0555a3fdb0 2024-12-10T14:27:54,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/0f18e3eae83545039b87bf0555a3fdb0, entries=150, sequenceid=18, filesize=11.7 K 2024-12-10T14:27:54,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/fdb343bca55543578511d134b556ceb3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fdb343bca55543578511d134b556ceb3 2024-12-10T14:27:54,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fdb343bca55543578511d134b556ceb3, entries=150, sequenceid=18, filesize=11.7 K 2024-12-10T14:27:54,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for a09038ab689acaef0b961036bc4b4bd2 in 1307ms, sequenceid=18, compaction requested=false 2024-12-10T14:27:54,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:54,796 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:54,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T14:27:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:54,797 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T14:27:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:27:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:27:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:27:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:54,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bbec03b9aee944ae8719633f3d80fdc2_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840873447/Put/seqid=0 2024-12-10T14:27:54,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742331_1507 (size=12154) 2024-12-10T14:27:55,079 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T14:27:55,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:55,214 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bbec03b9aee944ae8719633f3d80fdc2_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bbec03b9aee944ae8719633f3d80fdc2_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:55,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/ee72c5ea2b5b434085b3a4bc508fd480, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:55,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/ee72c5ea2b5b434085b3a4bc508fd480 is 175, key is test_row_0/A:col10/1733840873447/Put/seqid=0 2024-12-10T14:27:55,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742332_1508 (size=30955) 2024-12-10T14:27:55,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T14:27:55,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:55,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:55,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840935570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840935571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840935571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840935572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840935572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,619 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/ee72c5ea2b5b434085b3a4bc508fd480 2024-12-10T14:27:55,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/ab90d0f78fb24d959fb485ad040d6fb8 is 50, key is test_row_0/B:col10/1733840873447/Put/seqid=0 2024-12-10T14:27:55,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742333_1509 (size=12001) 2024-12-10T14:27:55,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840935673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840935674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840935675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840935675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840935675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840935876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840935876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840935877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840935877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:55,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840935877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,051 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/ab90d0f78fb24d959fb485ad040d6fb8 2024-12-10T14:27:56,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/63d014170c1b4c689d7046c270565518 is 50, key is test_row_0/C:col10/1733840873447/Put/seqid=0 2024-12-10T14:27:56,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742334_1510 (size=12001) 2024-12-10T14:27:56,061 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/63d014170c1b4c689d7046c270565518 2024-12-10T14:27:56,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/ee72c5ea2b5b434085b3a4bc508fd480 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/ee72c5ea2b5b434085b3a4bc508fd480 2024-12-10T14:27:56,067 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/ee72c5ea2b5b434085b3a4bc508fd480, entries=150, sequenceid=41, filesize=30.2 K 2024-12-10T14:27:56,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/ab90d0f78fb24d959fb485ad040d6fb8 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ab90d0f78fb24d959fb485ad040d6fb8 2024-12-10T14:27:56,071 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ab90d0f78fb24d959fb485ad040d6fb8, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T14:27:56,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/63d014170c1b4c689d7046c270565518 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/63d014170c1b4c689d7046c270565518 2024-12-10T14:27:56,074 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/63d014170c1b4c689d7046c270565518, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T14:27:56,075 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for a09038ab689acaef0b961036bc4b4bd2 in 1278ms, sequenceid=41, compaction requested=false 2024-12-10T14:27:56,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:56,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:56,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-10T14:27:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-10T14:27:56,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-10T14:27:56,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6520 sec 2024-12-10T14:27:56,079 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 2.6560 sec 2024-12-10T14:27:56,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:56,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T14:27:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:27:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:27:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:27:56,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:56,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f2550246b97e4598ba0bf499f9e6f085_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840875571/Put/seqid=0 2024-12-10T14:27:56,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742335_1511 (size=12154) 2024-12-10T14:27:56,195 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:56,199 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f2550246b97e4598ba0bf499f9e6f085_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f2550246b97e4598ba0bf499f9e6f085_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:56,200 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b4d637d0c00042e9995419aa5373eabf, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:56,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b4d637d0c00042e9995419aa5373eabf is 175, key is test_row_0/A:col10/1733840875571/Put/seqid=0 2024-12-10T14:27:56,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840936198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840936199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840936200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840936200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840936201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742336_1512 (size=30955) 2024-12-10T14:27:56,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840936303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840936304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840936304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840936304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840936309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840936506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840936507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840936507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840936508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840936512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,609 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=58, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b4d637d0c00042e9995419aa5373eabf 2024-12-10T14:27:56,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/352c8f54b2f9475aab0ff10ea6cfc671 is 50, key is test_row_0/B:col10/1733840875571/Put/seqid=0 2024-12-10T14:27:56,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742337_1513 (size=12001) 2024-12-10T14:27:56,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840936808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840936810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840936811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840936811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:56,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:56,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840936815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/352c8f54b2f9475aab0ff10ea6cfc671 2024-12-10T14:27:57,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/4fe77c5aa9224404bd1fd488f115497a is 50, key is test_row_0/C:col10/1733840875571/Put/seqid=0 2024-12-10T14:27:57,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742338_1514 (size=12001) 2024-12-10T14:27:57,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/4fe77c5aa9224404bd1fd488f115497a 2024-12-10T14:27:57,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b4d637d0c00042e9995419aa5373eabf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b4d637d0c00042e9995419aa5373eabf 2024-12-10T14:27:57,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b4d637d0c00042e9995419aa5373eabf, entries=150, sequenceid=58, filesize=30.2 K 2024-12-10T14:27:57,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/352c8f54b2f9475aab0ff10ea6cfc671 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/352c8f54b2f9475aab0ff10ea6cfc671 2024-12-10T14:27:57,041 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/352c8f54b2f9475aab0ff10ea6cfc671, entries=150, sequenceid=58, filesize=11.7 K 2024-12-10T14:27:57,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/4fe77c5aa9224404bd1fd488f115497a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/4fe77c5aa9224404bd1fd488f115497a 2024-12-10T14:27:57,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/4fe77c5aa9224404bd1fd488f115497a, entries=150, sequenceid=58, filesize=11.7 K 2024-12-10T14:27:57,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for a09038ab689acaef0b961036bc4b4bd2 in 864ms, sequenceid=58, compaction requested=true 2024-12-10T14:27:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:27:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:57,046 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:27:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:27:57,046 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:57,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:57,047 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:57,047 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/A is initiating minor compaction (all files) 2024-12-10T14:27:57,047 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/A in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,047 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/bb9a00276564451aa08d1271cb5a31cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/ee72c5ea2b5b434085b3a4bc508fd480, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b4d637d0c00042e9995419aa5373eabf] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=90.7 K 2024-12-10T14:27:57,047 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,047 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/bb9a00276564451aa08d1271cb5a31cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/ee72c5ea2b5b434085b3a4bc508fd480, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b4d637d0c00042e9995419aa5373eabf] 2024-12-10T14:27:57,047 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:57,047 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/B is initiating minor compaction (all files) 2024-12-10T14:27:57,048 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/B in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,048 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/0f18e3eae83545039b87bf0555a3fdb0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ab90d0f78fb24d959fb485ad040d6fb8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/352c8f54b2f9475aab0ff10ea6cfc671] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=35.2 K 2024-12-10T14:27:57,048 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb9a00276564451aa08d1271cb5a31cb, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733840873431 2024-12-10T14:27:57,048 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f18e3eae83545039b87bf0555a3fdb0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733840873431 2024-12-10T14:27:57,048 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee72c5ea2b5b434085b3a4bc508fd480, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733840873445 2024-12-10T14:27:57,048 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ab90d0f78fb24d959fb485ad040d6fb8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733840873445 2024-12-10T14:27:57,049 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4d637d0c00042e9995419aa5373eabf, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733840875571 2024-12-10T14:27:57,049 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 352c8f54b2f9475aab0ff10ea6cfc671, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733840875571 2024-12-10T14:27:57,056 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#B#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:57,057 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/f0d7351584b3468dbae2d039233aea18 is 50, key is test_row_0/B:col10/1733840875571/Put/seqid=0 2024-12-10T14:27:57,058 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:57,073 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210f13dc208c09b4f5bb32f7fea08f6e1a0_a09038ab689acaef0b961036bc4b4bd2 store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:57,074 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210f13dc208c09b4f5bb32f7fea08f6e1a0_a09038ab689acaef0b961036bc4b4bd2, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:57,074 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f13dc208c09b4f5bb32f7fea08f6e1a0_a09038ab689acaef0b961036bc4b4bd2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:57,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742339_1515 (size=12104) 2024-12-10T14:27:57,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742340_1516 (size=4469) 2024-12-10T14:27:57,081 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#A#compaction#433 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:57,081 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/71bfceb9127f47ca8e0c4a1047543243 is 175, key is test_row_0/A:col10/1733840875571/Put/seqid=0 2024-12-10T14:27:57,082 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/f0d7351584b3468dbae2d039233aea18 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/f0d7351584b3468dbae2d039233aea18 2024-12-10T14:27:57,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742341_1517 (size=31058) 2024-12-10T14:27:57,087 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/B of a09038ab689acaef0b961036bc4b4bd2 into f0d7351584b3468dbae2d039233aea18(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:57,087 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:57,087 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/B, priority=13, startTime=1733840877046; duration=0sec 2024-12-10T14:27:57,087 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:27:57,087 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:B 2024-12-10T14:27:57,087 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:27:57,088 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:27:57,088 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/C is initiating minor compaction (all files) 2024-12-10T14:27:57,088 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/C in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,088 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fdb343bca55543578511d134b556ceb3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/63d014170c1b4c689d7046c270565518, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/4fe77c5aa9224404bd1fd488f115497a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=35.2 K 2024-12-10T14:27:57,089 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting fdb343bca55543578511d134b556ceb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1733840873431 2024-12-10T14:27:57,089 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 63d014170c1b4c689d7046c270565518, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733840873445 2024-12-10T14:27:57,089 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fe77c5aa9224404bd1fd488f115497a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733840875571 2024-12-10T14:27:57,090 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/71bfceb9127f47ca8e0c4a1047543243 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/71bfceb9127f47ca8e0c4a1047543243 2024-12-10T14:27:57,096 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#C#compaction#434 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:27:57,097 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/187d8195b32c42bc80d08cc0f19b2e08 is 50, key is test_row_0/C:col10/1733840875571/Put/seqid=0 2024-12-10T14:27:57,101 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/A of a09038ab689acaef0b961036bc4b4bd2 into 71bfceb9127f47ca8e0c4a1047543243(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:57,101 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:57,101 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/A, priority=13, startTime=1733840877046; duration=0sec 2024-12-10T14:27:57,101 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:57,101 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:A 2024-12-10T14:27:57,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742342_1518 (size=12104) 2024-12-10T14:27:57,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:57,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-10T14:27:57,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:27:57,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:57,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:27:57,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:57,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:27:57,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:57,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107458cf839cc3443aa88a09aa457887e0_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840877314/Put/seqid=0 2024-12-10T14:27:57,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742343_1519 (size=12154) 2024-12-10T14:27:57,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840937323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840937325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840937326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840937327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840937327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840937428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840937428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840937428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840937430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840937430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,508 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/187d8195b32c42bc80d08cc0f19b2e08 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/187d8195b32c42bc80d08cc0f19b2e08 2024-12-10T14:27:57,511 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/C of a09038ab689acaef0b961036bc4b4bd2 into 187d8195b32c42bc80d08cc0f19b2e08(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:27:57,511 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:57,511 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/C, priority=13, startTime=1733840877046; duration=0sec 2024-12-10T14:27:57,512 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:27:57,512 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:C 2024-12-10T14:27:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T14:27:57,528 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-10T14:27:57,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:27:57,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-10T14:27:57,531 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:27:57,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:27:57,531 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:27:57,531 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:27:57,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840937630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840937630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:27:57,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840937631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840937632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840937637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,683 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:57,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:57,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:57,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,725 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:57,729 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107458cf839cc3443aa88a09aa457887e0_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107458cf839cc3443aa88a09aa457887e0_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:57,729 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b82efe5f5cf24d0e8fb8974b244034de, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:57,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b82efe5f5cf24d0e8fb8974b244034de is 175, key is test_row_0/A:col10/1733840877314/Put/seqid=0 2024-12-10T14:27:57,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742344_1520 (size=30955) 2024-12-10T14:27:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:27:57,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:57,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:57,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:57,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840937934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840937934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840937935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840937937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840937940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:57,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:57,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:57,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:57,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:57,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:27:58,134 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b82efe5f5cf24d0e8fb8974b244034de 2024-12-10T14:27:58,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/7f137a1ed808429397bf4c0f5bc576ff is 50, key is test_row_0/B:col10/1733840877314/Put/seqid=0 2024-12-10T14:27:58,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:58,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:58,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:58,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742345_1521 (size=12001) 2024-12-10T14:27:58,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:58,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:58,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:58,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:58,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840938439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:58,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:58,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840938439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:58,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:58,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840938440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:58,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840938440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:58,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840938443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:58,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:58,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:58,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:58,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/7f137a1ed808429397bf4c0f5bc576ff 2024-12-10T14:27:58,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/8a67d5f169e742c086103ee492dd5c66 is 50, key is test_row_0/C:col10/1733840877314/Put/seqid=0 2024-12-10T14:27:58,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742346_1522 (size=12001) 2024-12-10T14:27:58,598 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:58,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:27:58,751 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:58,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:58,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:58,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,904 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:58,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:58,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:27:58,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/8a67d5f169e742c086103ee492dd5c66 2024-12-10T14:27:58,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b82efe5f5cf24d0e8fb8974b244034de as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b82efe5f5cf24d0e8fb8974b244034de 2024-12-10T14:27:58,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b82efe5f5cf24d0e8fb8974b244034de, entries=150, sequenceid=81, filesize=30.2 K 2024-12-10T14:27:58,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/7f137a1ed808429397bf4c0f5bc576ff as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/7f137a1ed808429397bf4c0f5bc576ff 2024-12-10T14:27:58,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/7f137a1ed808429397bf4c0f5bc576ff, entries=150, sequenceid=81, filesize=11.7 K 2024-12-10T14:27:58,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/8a67d5f169e742c086103ee492dd5c66 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/8a67d5f169e742c086103ee492dd5c66 2024-12-10T14:27:58,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/8a67d5f169e742c086103ee492dd5c66, entries=150, sequenceid=81, filesize=11.7 K 2024-12-10T14:27:58,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for a09038ab689acaef0b961036bc4b4bd2 in 1659ms, sequenceid=81, compaction requested=false 2024-12-10T14:27:58,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:59,057 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:27:59,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T14:27:59,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:59,057 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T14:27:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:27:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:27:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:27:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:27:59,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121004c8744446744847b25a25e174fe4d09_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840877326/Put/seqid=0 2024-12-10T14:27:59,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742347_1523 (size=12154) 2024-12-10T14:27:59,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:59,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:27:59,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840939455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840939455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840939457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840939458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840939458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:27:59,471 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121004c8744446744847b25a25e174fe4d09_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121004c8744446744847b25a25e174fe4d09_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:27:59,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/536de0d9d2a747a3b1182504021910a1, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:27:59,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/536de0d9d2a747a3b1182504021910a1 is 175, key is test_row_0/A:col10/1733840877326/Put/seqid=0 2024-12-10T14:27:59,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742348_1524 (size=30955) 2024-12-10T14:27:59,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840939559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840939559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840939561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840939561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840939561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:27:59,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840939762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840939763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840939764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840939764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:27:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840939765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:27:59,877 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=98, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/536de0d9d2a747a3b1182504021910a1 2024-12-10T14:27:59,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/8491e473a2ac47c1ab117e444537b332 is 50, key is test_row_0/B:col10/1733840877326/Put/seqid=0 2024-12-10T14:27:59,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742349_1525 (size=12001) 2024-12-10T14:27:59,887 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/8491e473a2ac47c1ab117e444537b332 2024-12-10T14:27:59,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/65b1ae12c3a24e14863c0827999d7a25 is 50, key is test_row_0/C:col10/1733840877326/Put/seqid=0 2024-12-10T14:27:59,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742350_1526 (size=12001) 2024-12-10T14:27:59,896 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/65b1ae12c3a24e14863c0827999d7a25 2024-12-10T14:27:59,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/536de0d9d2a747a3b1182504021910a1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/536de0d9d2a747a3b1182504021910a1 2024-12-10T14:27:59,903 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/536de0d9d2a747a3b1182504021910a1, entries=150, sequenceid=98, filesize=30.2 K 2024-12-10T14:27:59,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/8491e473a2ac47c1ab117e444537b332 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/8491e473a2ac47c1ab117e444537b332 2024-12-10T14:27:59,907 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/8491e473a2ac47c1ab117e444537b332, entries=150, sequenceid=98, filesize=11.7 K 2024-12-10T14:27:59,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/65b1ae12c3a24e14863c0827999d7a25 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/65b1ae12c3a24e14863c0827999d7a25 2024-12-10T14:27:59,910 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/65b1ae12c3a24e14863c0827999d7a25, entries=150, sequenceid=98, filesize=11.7 K 2024-12-10T14:27:59,911 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for a09038ab689acaef0b961036bc4b4bd2 in 854ms, sequenceid=98, compaction requested=true 2024-12-10T14:27:59,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:27:59,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:27:59,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-10T14:27:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-10T14:27:59,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-10T14:27:59,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3810 sec 2024-12-10T14:27:59,915 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 2.3850 sec 2024-12-10T14:28:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:00,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T14:28:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:00,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dd73882794874d80a2dc93f23a8f3102_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840880073/Put/seqid=0 2024-12-10T14:28:00,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840940078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840940078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840940079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840940085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840940085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742351_1527 (size=17034) 2024-12-10T14:28:00,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840940186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840940187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840940187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840940192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840940192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840940390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840940390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840940390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840940394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840940395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,498 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:00,501 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dd73882794874d80a2dc93f23a8f3102_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dd73882794874d80a2dc93f23a8f3102_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:00,502 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b9f049599d72489fa35576752b0d715c, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:00,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b9f049599d72489fa35576752b0d715c is 175, key is test_row_0/A:col10/1733840880073/Put/seqid=0 2024-12-10T14:28:00,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742352_1528 (size=48139) 2024-12-10T14:28:00,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840940692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840940693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840940693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840940696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:00,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840940699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:00,907 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=124, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b9f049599d72489fa35576752b0d715c 2024-12-10T14:28:00,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/a7ff227d438b4d898e4f49ea6d1409c2 is 50, key is test_row_0/B:col10/1733840880073/Put/seqid=0 2024-12-10T14:28:00,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742353_1529 (size=12001) 2024-12-10T14:28:01,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840941197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:01,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:01,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840941199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:01,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:01,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840941199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:01,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840941199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:01,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:01,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840941204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:01,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/a7ff227d438b4d898e4f49ea6d1409c2 2024-12-10T14:28:01,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/86e4bbfab84a4b19a68db17807b02b3b is 50, key is test_row_0/C:col10/1733840880073/Put/seqid=0 2024-12-10T14:28:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742354_1530 (size=12001) 2024-12-10T14:28:01,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T14:28:01,635 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-10T14:28:01,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:28:01,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-10T14:28:01,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T14:28:01,638 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:28:01,639 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:28:01,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:28:01,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T14:28:01,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/86e4bbfab84a4b19a68db17807b02b3b 2024-12-10T14:28:01,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b9f049599d72489fa35576752b0d715c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b9f049599d72489fa35576752b0d715c 2024-12-10T14:28:01,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b9f049599d72489fa35576752b0d715c, entries=250, sequenceid=124, filesize=47.0 K 2024-12-10T14:28:01,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/a7ff227d438b4d898e4f49ea6d1409c2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a7ff227d438b4d898e4f49ea6d1409c2 2024-12-10T14:28:01,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a7ff227d438b4d898e4f49ea6d1409c2, entries=150, sequenceid=124, filesize=11.7 K 2024-12-10T14:28:01,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/86e4bbfab84a4b19a68db17807b02b3b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/86e4bbfab84a4b19a68db17807b02b3b 2024-12-10T14:28:01,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/86e4bbfab84a4b19a68db17807b02b3b, entries=150, sequenceid=124, filesize=11.7 K 2024-12-10T14:28:01,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for a09038ab689acaef0b961036bc4b4bd2 in 1687ms, sequenceid=124, compaction requested=true 2024-12-10T14:28:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:28:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:28:01,761 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:28:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:28:01,761 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:28:01,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:01,762 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:28:01,762 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:28:01,762 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/A is initiating minor compaction (all files) 2024-12-10T14:28:01,762 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/B is initiating minor compaction (all files) 2024-12-10T14:28:01,762 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/A in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:01,762 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/B in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:01,762 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/71bfceb9127f47ca8e0c4a1047543243, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b82efe5f5cf24d0e8fb8974b244034de, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/536de0d9d2a747a3b1182504021910a1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b9f049599d72489fa35576752b0d715c] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=137.8 K 2024-12-10T14:28:01,762 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/f0d7351584b3468dbae2d039233aea18, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/7f137a1ed808429397bf4c0f5bc576ff, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/8491e473a2ac47c1ab117e444537b332, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a7ff227d438b4d898e4f49ea6d1409c2] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=47.0 K 2024-12-10T14:28:01,762 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:01,762 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/71bfceb9127f47ca8e0c4a1047543243, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b82efe5f5cf24d0e8fb8974b244034de, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/536de0d9d2a747a3b1182504021910a1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b9f049599d72489fa35576752b0d715c] 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f0d7351584b3468dbae2d039233aea18, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733840875571 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71bfceb9127f47ca8e0c4a1047543243, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733840875571 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f137a1ed808429397bf4c0f5bc576ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733840876198 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b82efe5f5cf24d0e8fb8974b244034de, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733840876198 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8491e473a2ac47c1ab117e444537b332, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840877317 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 536de0d9d2a747a3b1182504021910a1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840877317 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9f049599d72489fa35576752b0d715c, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733840879457 2024-12-10T14:28:01,763 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a7ff227d438b4d898e4f49ea6d1409c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733840880072 2024-12-10T14:28:01,770 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:01,771 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412107b8b1245dbdb4e1e9df714b02c52adb5_a09038ab689acaef0b961036bc4b4bd2 store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:01,772 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#B#compaction#444 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:01,772 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/811391b31ac749f89d0e7dfcb0f363d7 is 50, key is test_row_0/B:col10/1733840880073/Put/seqid=0 2024-12-10T14:28:01,774 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412107b8b1245dbdb4e1e9df714b02c52adb5_a09038ab689acaef0b961036bc4b4bd2, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:01,774 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107b8b1245dbdb4e1e9df714b02c52adb5_a09038ab689acaef0b961036bc4b4bd2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:01,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742355_1531 (size=12241) 2024-12-10T14:28:01,786 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/811391b31ac749f89d0e7dfcb0f363d7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/811391b31ac749f89d0e7dfcb0f363d7 2024-12-10T14:28:01,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742356_1532 (size=4469) 2024-12-10T14:28:01,787 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#A#compaction#445 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:01,788 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/755b929230b54dbea4af97a698c3e7bf is 175, key is test_row_0/A:col10/1733840880073/Put/seqid=0 2024-12-10T14:28:01,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:01,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T14:28:01,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:01,791 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T14:28:01,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:01,791 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/B of a09038ab689acaef0b961036bc4b4bd2 into 811391b31ac749f89d0e7dfcb0f363d7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:01,791 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:01,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:01,791 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/B, priority=12, startTime=1733840881761; duration=0sec 2024-12-10T14:28:01,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:01,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:01,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:01,792 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:01,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:01,792 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:B 2024-12-10T14:28:01,792 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T14:28:01,793 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T14:28:01,793 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/C is initiating minor compaction (all files) 2024-12-10T14:28:01,793 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/C in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:01,793 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/187d8195b32c42bc80d08cc0f19b2e08, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/8a67d5f169e742c086103ee492dd5c66, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/65b1ae12c3a24e14863c0827999d7a25, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/86e4bbfab84a4b19a68db17807b02b3b] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=47.0 K 2024-12-10T14:28:01,795 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 187d8195b32c42bc80d08cc0f19b2e08, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1733840875571 2024-12-10T14:28:01,795 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a67d5f169e742c086103ee492dd5c66, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733840876198 2024-12-10T14:28:01,796 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 65b1ae12c3a24e14863c0827999d7a25, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733840877317 2024-12-10T14:28:01,796 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 86e4bbfab84a4b19a68db17807b02b3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733840880072 2024-12-10T14:28:01,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742357_1533 (size=31195) 2024-12-10T14:28:01,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dd4e94f5217f44df9411cf26dc447d8c_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840880084/Put/seqid=0 2024-12-10T14:28:01,803 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/755b929230b54dbea4af97a698c3e7bf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/755b929230b54dbea4af97a698c3e7bf 2024-12-10T14:28:01,806 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#C#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:01,807 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/ea314c37c89b4503b48d6c19082c25c1 is 50, key is test_row_0/C:col10/1733840880073/Put/seqid=0 2024-12-10T14:28:01,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742358_1534 (size=12304) 2024-12-10T14:28:01,809 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/A of a09038ab689acaef0b961036bc4b4bd2 into 755b929230b54dbea4af97a698c3e7bf(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:01,809 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:01,809 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/A, priority=12, startTime=1733840881761; duration=0sec 2024-12-10T14:28:01,809 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:01,809 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:A 2024-12-10T14:28:01,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:01,813 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dd4e94f5217f44df9411cf26dc447d8c_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dd4e94f5217f44df9411cf26dc447d8c_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b18a1135bbb2450983cb866aa6cc4fb2, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:01,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b18a1135bbb2450983cb866aa6cc4fb2 is 175, key is test_row_0/A:col10/1733840880084/Put/seqid=0 2024-12-10T14:28:01,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742359_1535 (size=12241) 2024-12-10T14:28:01,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742360_1536 (size=31105) 2024-12-10T14:28:01,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T14:28:02,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:02,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:02,222 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/ea314c37c89b4503b48d6c19082c25c1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ea314c37c89b4503b48d6c19082c25c1 2024-12-10T14:28:02,226 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b18a1135bbb2450983cb866aa6cc4fb2 2024-12-10T14:28:02,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/54fdfcf264da4d44bfb067cefefe1bf4 is 50, key is test_row_0/B:col10/1733840880084/Put/seqid=0 2024-12-10T14:28:02,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742361_1537 (size=12151) 2024-12-10T14:28:02,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T14:28:02,241 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/C of a09038ab689acaef0b961036bc4b4bd2 into ea314c37c89b4503b48d6c19082c25c1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:02,241 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:02,241 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/C, priority=12, startTime=1733840881761; duration=0sec 2024-12-10T14:28:02,241 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:02,241 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:C 2024-12-10T14:28:02,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840942252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840942252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840942253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840942253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840942254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840942356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840942356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840942356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840942356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840942356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840942557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840942558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840942558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840942559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840942559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,634 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/54fdfcf264da4d44bfb067cefefe1bf4 2024-12-10T14:28:02,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/f6b194039010414ba4f7d763ad4fae23 is 50, key is test_row_0/C:col10/1733840880084/Put/seqid=0 2024-12-10T14:28:02,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742362_1538 (size=12151) 2024-12-10T14:28:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T14:28:02,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840942860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840942861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840942861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840942863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:02,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:02,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840942864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,046 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/f6b194039010414ba4f7d763ad4fae23 2024-12-10T14:28:03,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b18a1135bbb2450983cb866aa6cc4fb2 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b18a1135bbb2450983cb866aa6cc4fb2 2024-12-10T14:28:03,052 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b18a1135bbb2450983cb866aa6cc4fb2, entries=150, sequenceid=135, filesize=30.4 K 2024-12-10T14:28:03,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/54fdfcf264da4d44bfb067cefefe1bf4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/54fdfcf264da4d44bfb067cefefe1bf4 2024-12-10T14:28:03,056 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/54fdfcf264da4d44bfb067cefefe1bf4, entries=150, sequenceid=135, filesize=11.9 K 2024-12-10T14:28:03,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/f6b194039010414ba4f7d763ad4fae23 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/f6b194039010414ba4f7d763ad4fae23 2024-12-10T14:28:03,060 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/f6b194039010414ba4f7d763ad4fae23, entries=150, sequenceid=135, filesize=11.9 K 2024-12-10T14:28:03,060 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for a09038ab689acaef0b961036bc4b4bd2 in 1269ms, sequenceid=135, compaction requested=false 2024-12-10T14:28:03,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:03,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:03,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-10T14:28:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-10T14:28:03,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-10T14:28:03,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4220 sec 2024-12-10T14:28:03,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 1.4260 sec 2024-12-10T14:28:03,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:03,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-10T14:28:03,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:03,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:03,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:03,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:03,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:03,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:03,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840943397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210209eeb1bfa4649b7819be9ed315e7fdc_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840883393/Put/seqid=0 2024-12-10T14:28:03,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840943398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840943398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840943398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840943399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742363_1539 (size=12304) 2024-12-10T14:28:03,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840943502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840943502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840943502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840943503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840943502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840943705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840943705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840943705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840943705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:03,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840943706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T14:28:03,741 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-10T14:28:03,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:28:03,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-10T14:28:03,744 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:28:03,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T14:28:03,744 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:28:03,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:28:03,809 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:03,813 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210209eeb1bfa4649b7819be9ed315e7fdc_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210209eeb1bfa4649b7819be9ed315e7fdc_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:03,813 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/0f061ca421c04d8ea807310158e02d52, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:03,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/0f061ca421c04d8ea807310158e02d52 is 175, key is test_row_0/A:col10/1733840883393/Put/seqid=0 2024-12-10T14:28:03,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742364_1540 (size=31105) 2024-12-10T14:28:03,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T14:28:03,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:03,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T14:28:03,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:03,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:03,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:03,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:03,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840944007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840944008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840944008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840944009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840944010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T14:28:04,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:04,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T14:28:04,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:04,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:04,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T14:28:04,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:04,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,218 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=167, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/0f061ca421c04d8ea807310158e02d52 2024-12-10T14:28:04,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e2a0d9b7fdda4235b8c55319cf236007 is 50, key is test_row_0/B:col10/1733840883393/Put/seqid=0 2024-12-10T14:28:04,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742365_1541 (size=12151) 2024-12-10T14:28:04,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e2a0d9b7fdda4235b8c55319cf236007 2024-12-10T14:28:04,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/e4dea5d27cac4adf8941969f269d0284 is 50, key is test_row_0/C:col10/1733840883393/Put/seqid=0 2024-12-10T14:28:04,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742366_1542 (size=12151) 2024-12-10T14:28:04,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T14:28:04,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:04,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T14:28:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,506 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:04,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T14:28:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:04,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840944511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840944512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840944512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840944514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:04,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840944516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:04,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/e4dea5d27cac4adf8941969f269d0284 2024-12-10T14:28:04,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/0f061ca421c04d8ea807310158e02d52 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/0f061ca421c04d8ea807310158e02d52 2024-12-10T14:28:04,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/0f061ca421c04d8ea807310158e02d52, entries=150, sequenceid=167, filesize=30.4 K 2024-12-10T14:28:04,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e2a0d9b7fdda4235b8c55319cf236007 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e2a0d9b7fdda4235b8c55319cf236007 2024-12-10T14:28:04,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e2a0d9b7fdda4235b8c55319cf236007, entries=150, sequenceid=167, filesize=11.9 K 2024-12-10T14:28:04,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/e4dea5d27cac4adf8941969f269d0284 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/e4dea5d27cac4adf8941969f269d0284 2024-12-10T14:28:04,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/e4dea5d27cac4adf8941969f269d0284, entries=150, sequenceid=167, filesize=11.9 K 2024-12-10T14:28:04,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=33.54 KB/34350 for a09038ab689acaef0b961036bc4b4bd2 in 1262ms, sequenceid=167, compaction requested=true 2024-12-10T14:28:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:28:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:04,655 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:28:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:28:04,655 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:04,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:04,656 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93405 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:04,656 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/A is initiating minor compaction (all files) 2024-12-10T14:28:04,656 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/A in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,656 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/755b929230b54dbea4af97a698c3e7bf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b18a1135bbb2450983cb866aa6cc4fb2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/0f061ca421c04d8ea807310158e02d52] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=91.2 K 2024-12-10T14:28:04,656 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,656 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/755b929230b54dbea4af97a698c3e7bf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b18a1135bbb2450983cb866aa6cc4fb2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/0f061ca421c04d8ea807310158e02d52] 2024-12-10T14:28:04,656 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:04,657 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/B is initiating minor compaction (all files) 2024-12-10T14:28:04,657 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/B in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,657 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 755b929230b54dbea4af97a698c3e7bf, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733840880072 2024-12-10T14:28:04,657 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/811391b31ac749f89d0e7dfcb0f363d7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/54fdfcf264da4d44bfb067cefefe1bf4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e2a0d9b7fdda4235b8c55319cf236007] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=35.7 K 2024-12-10T14:28:04,657 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b18a1135bbb2450983cb866aa6cc4fb2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733840880078 2024-12-10T14:28:04,657 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 811391b31ac749f89d0e7dfcb0f363d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733840880072 2024-12-10T14:28:04,657 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 54fdfcf264da4d44bfb067cefefe1bf4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733840880078 2024-12-10T14:28:04,657 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f061ca421c04d8ea807310158e02d52, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733840882252 2024-12-10T14:28:04,658 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e2a0d9b7fdda4235b8c55319cf236007, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733840882252 2024-12-10T14:28:04,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:04,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T14:28:04,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,659 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T14:28:04,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:04,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:04,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:04,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:04,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:04,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:04,664 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:04,665 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#B#compaction#453 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:04,665 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/232a7847382b4f4cb3731947c5699ddb is 50, key is test_row_0/B:col10/1733840883393/Put/seqid=0 2024-12-10T14:28:04,667 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210607e8ac736064fe89d8957d908487f54_a09038ab689acaef0b961036bc4b4bd2 store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:04,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210211bba89a21c416a853632a9009cbf59_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840883396/Put/seqid=0 2024-12-10T14:28:04,669 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210607e8ac736064fe89d8957d908487f54_a09038ab689acaef0b961036bc4b4bd2, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:04,669 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210607e8ac736064fe89d8957d908487f54_a09038ab689acaef0b961036bc4b4bd2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:04,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742367_1543 (size=12493) 2024-12-10T14:28:04,681 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/232a7847382b4f4cb3731947c5699ddb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/232a7847382b4f4cb3731947c5699ddb 2024-12-10T14:28:04,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742368_1544 (size=12304) 2024-12-10T14:28:04,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742369_1545 (size=4469) 2024-12-10T14:28:04,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:04,687 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/B of a09038ab689acaef0b961036bc4b4bd2 into 232a7847382b4f4cb3731947c5699ddb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:04,687 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:04,687 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/B, priority=13, startTime=1733840884655; duration=0sec 2024-12-10T14:28:04,687 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:04,687 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:B 2024-12-10T14:28:04,687 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:04,687 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210211bba89a21c416a853632a9009cbf59_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210211bba89a21c416a853632a9009cbf59_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:04,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/1e93262bff8d43a5944fd0f93f3262ab, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:04,688 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:04,688 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/C is initiating minor compaction (all files) 2024-12-10T14:28:04,688 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/C in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:04,688 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ea314c37c89b4503b48d6c19082c25c1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/f6b194039010414ba4f7d763ad4fae23, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/e4dea5d27cac4adf8941969f269d0284] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=35.7 K 2024-12-10T14:28:04,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/1e93262bff8d43a5944fd0f93f3262ab is 175, key is test_row_0/A:col10/1733840883396/Put/seqid=0 2024-12-10T14:28:04,688 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ea314c37c89b4503b48d6c19082c25c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733840880072 2024-12-10T14:28:04,689 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting f6b194039010414ba4f7d763ad4fae23, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733840880078 2024-12-10T14:28:04,689 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e4dea5d27cac4adf8941969f269d0284, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733840882252 2024-12-10T14:28:04,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742370_1546 (size=31105) 2024-12-10T14:28:04,695 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#C#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:04,696 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/a0a959aefdd340d5a9d39d0ba1ce93cf is 50, key is test_row_0/C:col10/1733840883393/Put/seqid=0 2024-12-10T14:28:04,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742371_1547 (size=12493) 2024-12-10T14:28:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T14:28:05,085 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#A#compaction#454 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:05,086 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b62fd3d8ca2b4976b8aa487e51d8be6c is 175, key is test_row_0/A:col10/1733840883393/Put/seqid=0 2024-12-10T14:28:05,092 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/1e93262bff8d43a5944fd0f93f3262ab 2024-12-10T14:28:05,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742372_1548 (size=31447) 2024-12-10T14:28:05,109 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/a0a959aefdd340d5a9d39d0ba1ce93cf as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a0a959aefdd340d5a9d39d0ba1ce93cf 2024-12-10T14:28:05,110 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/b62fd3d8ca2b4976b8aa487e51d8be6c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b62fd3d8ca2b4976b8aa487e51d8be6c 2024-12-10T14:28:05,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/ffb617eb03454b628f0b8e0797098499 is 50, key is test_row_0/B:col10/1733840883396/Put/seqid=0 2024-12-10T14:28:05,115 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/A of a09038ab689acaef0b961036bc4b4bd2 into b62fd3d8ca2b4976b8aa487e51d8be6c(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:05,115 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:05,115 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/A, priority=13, startTime=1733840884655; duration=0sec 2024-12-10T14:28:05,115 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/C of a09038ab689acaef0b961036bc4b4bd2 into a0a959aefdd340d5a9d39d0ba1ce93cf(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:05,115 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:05,115 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/C, priority=13, startTime=1733840884655; duration=0sec 2024-12-10T14:28:05,115 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:05,115 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:A 2024-12-10T14:28:05,115 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:05,115 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:C 2024-12-10T14:28:05,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742373_1549 (size=12151) 2024-12-10T14:28:05,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:05,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:05,526 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/ffb617eb03454b628f0b8e0797098499 2024-12-10T14:28:05,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840945537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840945537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840945537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840945538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840945539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/21bcb865ac1c44aaa9946e89eaa3c79a is 50, key is test_row_0/C:col10/1733840883396/Put/seqid=0 2024-12-10T14:28:05,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742374_1550 (size=12151) 2024-12-10T14:28:05,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840945640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840945641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840945641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840945641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840945642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840945843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840945844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840945844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840945844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840945844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T14:28:05,958 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/21bcb865ac1c44aaa9946e89eaa3c79a 2024-12-10T14:28:05,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/1e93262bff8d43a5944fd0f93f3262ab as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/1e93262bff8d43a5944fd0f93f3262ab 2024-12-10T14:28:05,965 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/1e93262bff8d43a5944fd0f93f3262ab, entries=150, sequenceid=175, filesize=30.4 K 2024-12-10T14:28:05,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/ffb617eb03454b628f0b8e0797098499 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ffb617eb03454b628f0b8e0797098499 2024-12-10T14:28:05,968 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ffb617eb03454b628f0b8e0797098499, entries=150, sequenceid=175, filesize=11.9 K 2024-12-10T14:28:05,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/21bcb865ac1c44aaa9946e89eaa3c79a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/21bcb865ac1c44aaa9946e89eaa3c79a 2024-12-10T14:28:05,972 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/21bcb865ac1c44aaa9946e89eaa3c79a, entries=150, sequenceid=175, filesize=11.9 K 2024-12-10T14:28:05,972 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for a09038ab689acaef0b961036bc4b4bd2 in 1313ms, sequenceid=175, compaction requested=false 2024-12-10T14:28:05,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:05,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:05,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-10T14:28:05,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-10T14:28:05,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-10T14:28:05,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2290 sec 2024-12-10T14:28:05,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.2330 sec 2024-12-10T14:28:06,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:06,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-10T14:28:06,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:06,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:06,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:06,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:06,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:06,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:06,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840946149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840946150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840946151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ab881d7ac68a47e6a6768314f2228171_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840886147/Put/seqid=0 2024-12-10T14:28:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840946153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840946153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742375_1551 (size=14794) 2024-12-10T14:28:06,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840946253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840946254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840946254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840946256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840946256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840946455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840946456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840946457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840946458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840946459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,559 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:06,562 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ab881d7ac68a47e6a6768314f2228171_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ab881d7ac68a47e6a6768314f2228171_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:06,563 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/6f574b5d1b4b431694fca765fe5d623d, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:06,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/6f574b5d1b4b431694fca765fe5d623d is 175, key is test_row_0/A:col10/1733840886147/Put/seqid=0 2024-12-10T14:28:06,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742376_1552 (size=39749) 2024-12-10T14:28:06,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840946759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840946760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840946760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840946761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:06,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840946763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:06,967 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=208, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/6f574b5d1b4b431694fca765fe5d623d 2024-12-10T14:28:06,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/a1b9288697b44a8daf6d7621623efd5c is 50, key is test_row_0/B:col10/1733840886147/Put/seqid=0 2024-12-10T14:28:06,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742377_1553 (size=12151) 2024-12-10T14:28:07,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:07,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840947262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:07,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:07,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840947264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:07,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:07,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840947265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:07,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:07,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840947266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:07,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:07,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840947267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:07,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/a1b9288697b44a8daf6d7621623efd5c 2024-12-10T14:28:07,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/231a5d9747724fb2928ed319ba60056a is 50, key is test_row_0/C:col10/1733840886147/Put/seqid=0 2024-12-10T14:28:07,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742378_1554 (size=12151) 2024-12-10T14:28:07,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/231a5d9747724fb2928ed319ba60056a 2024-12-10T14:28:07,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/6f574b5d1b4b431694fca765fe5d623d as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6f574b5d1b4b431694fca765fe5d623d 2024-12-10T14:28:07,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6f574b5d1b4b431694fca765fe5d623d, entries=200, sequenceid=208, filesize=38.8 K 2024-12-10T14:28:07,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/a1b9288697b44a8daf6d7621623efd5c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a1b9288697b44a8daf6d7621623efd5c 2024-12-10T14:28:07,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a1b9288697b44a8daf6d7621623efd5c, entries=150, sequenceid=208, filesize=11.9 K 2024-12-10T14:28:07,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/231a5d9747724fb2928ed319ba60056a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/231a5d9747724fb2928ed319ba60056a 2024-12-10T14:28:07,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/231a5d9747724fb2928ed319ba60056a, entries=150, sequenceid=208, filesize=11.9 K 2024-12-10T14:28:07,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for a09038ab689acaef0b961036bc4b4bd2 in 1662ms, sequenceid=208, compaction requested=true 2024-12-10T14:28:07,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:07,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:28:07,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:07,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:28:07,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:07,811 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:07,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:28:07,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:07,811 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:07,812 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:07,812 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/A is initiating minor compaction (all files) 2024-12-10T14:28:07,812 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/A in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:07,812 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b62fd3d8ca2b4976b8aa487e51d8be6c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/1e93262bff8d43a5944fd0f93f3262ab, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6f574b5d1b4b431694fca765fe5d623d] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=99.9 K 2024-12-10T14:28:07,812 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:07,812 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b62fd3d8ca2b4976b8aa487e51d8be6c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/1e93262bff8d43a5944fd0f93f3262ab, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6f574b5d1b4b431694fca765fe5d623d] 2024-12-10T14:28:07,813 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:07,813 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/B is initiating minor compaction (all files) 2024-12-10T14:28:07,813 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/B in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:07,813 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/232a7847382b4f4cb3731947c5699ddb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ffb617eb03454b628f0b8e0797098499, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a1b9288697b44a8daf6d7621623efd5c] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=35.9 K 2024-12-10T14:28:07,813 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 232a7847382b4f4cb3731947c5699ddb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733840882252 2024-12-10T14:28:07,813 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting b62fd3d8ca2b4976b8aa487e51d8be6c, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733840882252 2024-12-10T14:28:07,814 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e93262bff8d43a5944fd0f93f3262ab, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733840883396 2024-12-10T14:28:07,814 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting ffb617eb03454b628f0b8e0797098499, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733840883396 2024-12-10T14:28:07,814 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a1b9288697b44a8daf6d7621623efd5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733840885538 2024-12-10T14:28:07,814 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f574b5d1b4b431694fca765fe5d623d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733840885535 2024-12-10T14:28:07,819 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:07,820 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#B#compaction#463 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:07,821 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/c634d28ae1444e1cae09666fc8db30cb is 50, key is test_row_0/B:col10/1733840886147/Put/seqid=0 2024-12-10T14:28:07,822 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210be89c486390a4371aae457acc0da0962_a09038ab689acaef0b961036bc4b4bd2 store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:07,824 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210be89c486390a4371aae457acc0da0962_a09038ab689acaef0b961036bc4b4bd2, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:07,824 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210be89c486390a4371aae457acc0da0962_a09038ab689acaef0b961036bc4b4bd2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:07,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742379_1555 (size=12595) 2024-12-10T14:28:07,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742380_1556 (size=4469) 2024-12-10T14:28:07,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T14:28:07,848 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-10T14:28:07,849 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:28:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-10T14:28:07,850 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:28:07,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:28:07,851 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:28:07,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:28:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:28:08,002 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:08,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T14:28:08,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:08,003 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:28:08,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:08,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:08,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:08,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:08,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:08,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:08,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102311bcb8485a4a25a237b2f4734b1011_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840886151/Put/seqid=0 2024-12-10T14:28:08,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742381_1557 (size=12304) 2024-12-10T14:28:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:28:08,230 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#A#compaction#462 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:08,230 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/6b57bda8ca9f492fa019a19288470c0a is 175, key is test_row_0/A:col10/1733840886147/Put/seqid=0 2024-12-10T14:28:08,232 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/c634d28ae1444e1cae09666fc8db30cb as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/c634d28ae1444e1cae09666fc8db30cb 2024-12-10T14:28:08,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742382_1558 (size=31549) 2024-12-10T14:28:08,238 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/B of a09038ab689acaef0b961036bc4b4bd2 into c634d28ae1444e1cae09666fc8db30cb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:08,238 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:08,238 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/B, priority=13, startTime=1733840887811; duration=0sec 2024-12-10T14:28:08,238 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:08,238 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:B 2024-12-10T14:28:08,238 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:08,239 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:08,239 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/C is initiating minor compaction (all files) 2024-12-10T14:28:08,239 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/C in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:08,239 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a0a959aefdd340d5a9d39d0ba1ce93cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/21bcb865ac1c44aaa9946e89eaa3c79a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/231a5d9747724fb2928ed319ba60056a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=35.9 K 2024-12-10T14:28:08,240 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a0a959aefdd340d5a9d39d0ba1ce93cf, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733840882252 2024-12-10T14:28:08,240 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 21bcb865ac1c44aaa9946e89eaa3c79a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733840883396 2024-12-10T14:28:08,240 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 231a5d9747724fb2928ed319ba60056a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733840885538 2024-12-10T14:28:08,246 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#C#compaction#465 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:08,247 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/3cc8e1e7e51d43918e59e16f5e6ec020 is 50, key is test_row_0/C:col10/1733840886147/Put/seqid=0 2024-12-10T14:28:08,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742383_1559 (size=12595) 2024-12-10T14:28:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:08,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:08,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840948287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840948288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840948288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840948289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840948290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840948391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840948391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840948391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840948392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840948392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:08,416 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102311bcb8485a4a25a237b2f4734b1011_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102311bcb8485a4a25a237b2f4734b1011_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:08,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/712402941a5f452380b554d7950cdab1, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:08,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/712402941a5f452380b554d7950cdab1 is 175, key is test_row_0/A:col10/1733840886151/Put/seqid=0 2024-12-10T14:28:08,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742384_1560 (size=31105) 2024-12-10T14:28:08,421 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/712402941a5f452380b554d7950cdab1 2024-12-10T14:28:08,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e998880baaea4d0db948c43b840bca3a is 50, key is test_row_0/B:col10/1733840886151/Put/seqid=0 2024-12-10T14:28:08,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742385_1561 (size=12151) 2024-12-10T14:28:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:28:08,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840948593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840948594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840948594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840948595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840948595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,640 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/6b57bda8ca9f492fa019a19288470c0a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6b57bda8ca9f492fa019a19288470c0a 2024-12-10T14:28:08,643 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/A of a09038ab689acaef0b961036bc4b4bd2 into 6b57bda8ca9f492fa019a19288470c0a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:08,643 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:08,643 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/A, priority=13, startTime=1733840887811; duration=0sec 2024-12-10T14:28:08,644 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:08,644 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:A 2024-12-10T14:28:08,654 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/3cc8e1e7e51d43918e59e16f5e6ec020 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/3cc8e1e7e51d43918e59e16f5e6ec020 2024-12-10T14:28:08,658 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/C of a09038ab689acaef0b961036bc4b4bd2 into 3cc8e1e7e51d43918e59e16f5e6ec020(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:08,658 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:08,658 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/C, priority=13, startTime=1733840887811; duration=0sec 2024-12-10T14:28:08,658 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:08,658 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:C 2024-12-10T14:28:08,829 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e998880baaea4d0db948c43b840bca3a 2024-12-10T14:28:08,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/c46aa2f469c149f5bda4426eef3770f6 is 50, key is test_row_0/C:col10/1733840886151/Put/seqid=0 2024-12-10T14:28:08,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742386_1562 (size=12151) 2024-12-10T14:28:08,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840948895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840948898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840948898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840948898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840948899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:08,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:28:09,239 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/c46aa2f469c149f5bda4426eef3770f6 2024-12-10T14:28:09,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/712402941a5f452380b554d7950cdab1 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/712402941a5f452380b554d7950cdab1 2024-12-10T14:28:09,246 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/712402941a5f452380b554d7950cdab1, entries=150, sequenceid=215, filesize=30.4 K 2024-12-10T14:28:09,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e998880baaea4d0db948c43b840bca3a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e998880baaea4d0db948c43b840bca3a 2024-12-10T14:28:09,250 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e998880baaea4d0db948c43b840bca3a, entries=150, sequenceid=215, filesize=11.9 K 2024-12-10T14:28:09,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/c46aa2f469c149f5bda4426eef3770f6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/c46aa2f469c149f5bda4426eef3770f6 2024-12-10T14:28:09,253 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/c46aa2f469c149f5bda4426eef3770f6, entries=150, sequenceid=215, filesize=11.9 K 2024-12-10T14:28:09,254 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for a09038ab689acaef0b961036bc4b4bd2 in 1251ms, sequenceid=215, compaction requested=false 2024-12-10T14:28:09,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:09,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:09,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-10T14:28:09,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-10T14:28:09,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-10T14:28:09,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4040 sec 2024-12-10T14:28:09,257 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.4070 sec 2024-12-10T14:28:09,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:09,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-10T14:28:09,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:09,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:09,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840949404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840949405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840949405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840949406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840949406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121095d5b01728174e8eb014aaad16ef6c62_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840888289/Put/seqid=0 2024-12-10T14:28:09,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742387_1563 (size=14794) 2024-12-10T14:28:09,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840949508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840949508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840949509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840949510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840949710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840949711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840949712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840949712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:09,813 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:09,817 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121095d5b01728174e8eb014aaad16ef6c62_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121095d5b01728174e8eb014aaad16ef6c62_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:09,817 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/086a8f49d30c407eb6e07e23c3311d7b, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:09,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/086a8f49d30c407eb6e07e23c3311d7b is 175, key is test_row_0/A:col10/1733840888289/Put/seqid=0 2024-12-10T14:28:09,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742388_1564 (size=39749) 2024-12-10T14:28:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T14:28:09,959 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-10T14:28:09,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:28:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-10T14:28:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T14:28:09,962 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:28:09,962 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:28:09,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:28:10,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840950012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840950014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840950014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840950015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T14:28:10,114 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:10,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:10,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:10,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,224 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=248, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/086a8f49d30c407eb6e07e23c3311d7b 2024-12-10T14:28:10,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/1305fdfc32024c83b8824fc56d03be9a is 50, key is test_row_0/B:col10/1733840888289/Put/seqid=0 2024-12-10T14:28:10,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742389_1565 (size=12151) 2024-12-10T14:28:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T14:28:10,266 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:10,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:10,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:10,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840950408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,418 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:10,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:10,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:10,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,419 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840950517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840950517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840950518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840950518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:10,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T14:28:10,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:10,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/1305fdfc32024c83b8824fc56d03be9a 2024-12-10T14:28:10,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/a4cc614f3a0a436fbfc348c52ee646be is 50, key is test_row_0/C:col10/1733840888289/Put/seqid=0 2024-12-10T14:28:10,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742390_1566 (size=12151) 2024-12-10T14:28:10,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:10,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,876 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:10,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:10,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:10,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:10,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:10,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:11,028 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:11,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:11,029 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:11,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:11,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T14:28:11,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/a4cc614f3a0a436fbfc348c52ee646be 2024-12-10T14:28:11,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/086a8f49d30c407eb6e07e23c3311d7b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/086a8f49d30c407eb6e07e23c3311d7b 2024-12-10T14:28:11,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/086a8f49d30c407eb6e07e23c3311d7b, entries=200, sequenceid=248, filesize=38.8 K 2024-12-10T14:28:11,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/1305fdfc32024c83b8824fc56d03be9a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/1305fdfc32024c83b8824fc56d03be9a 2024-12-10T14:28:11,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/1305fdfc32024c83b8824fc56d03be9a, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T14:28:11,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/a4cc614f3a0a436fbfc348c52ee646be as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a4cc614f3a0a436fbfc348c52ee646be 2024-12-10T14:28:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a4cc614f3a0a436fbfc348c52ee646be, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T14:28:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for a09038ab689acaef0b961036bc4b4bd2 in 1681ms, sequenceid=248, compaction requested=true 2024-12-10T14:28:11,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:11,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:28:11,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:11,083 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:11,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:28:11,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:11,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:28:11,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T14:28:11,084 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:11,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,087 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:11,087 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:11,087 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/B is initiating minor compaction (all files) 2024-12-10T14:28:11,087 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/A is initiating minor compaction (all files) 2024-12-10T14:28:11,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,087 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/B in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:11,087 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/A in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:11,087 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/c634d28ae1444e1cae09666fc8db30cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e998880baaea4d0db948c43b840bca3a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/1305fdfc32024c83b8824fc56d03be9a] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=36.0 K 2024-12-10T14:28:11,087 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6b57bda8ca9f492fa019a19288470c0a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/712402941a5f452380b554d7950cdab1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/086a8f49d30c407eb6e07e23c3311d7b] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=100.0 K 2024-12-10T14:28:11,087 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:11,087 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6b57bda8ca9f492fa019a19288470c0a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/712402941a5f452380b554d7950cdab1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/086a8f49d30c407eb6e07e23c3311d7b] 2024-12-10T14:28:11,088 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c634d28ae1444e1cae09666fc8db30cb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733840885538 2024-12-10T14:28:11,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,088 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b57bda8ca9f492fa019a19288470c0a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733840885538 2024-12-10T14:28:11,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,088 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting e998880baaea4d0db948c43b840bca3a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840886150 2024-12-10T14:28:11,088 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 712402941a5f452380b554d7950cdab1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840886150 2024-12-10T14:28:11,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,089 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 086a8f49d30c407eb6e07e23c3311d7b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733840888286 2024-12-10T14:28:11,089 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 1305fdfc32024c83b8824fc56d03be9a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733840888288 2024-12-10T14:28:11,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,094 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:11,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,095 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#B#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:11,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,096 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/cac20f87e1d743868f678d5f3683f31f is 50, key is test_row_0/B:col10/1733840888289/Put/seqid=0 2024-12-10T14:28:11,096 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210c679993f19fe42e0a0266e7d43e86939_a09038ab689acaef0b961036bc4b4bd2 store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:11,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,098 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210c679993f19fe42e0a0266e7d43e86939_a09038ab689acaef0b961036bc4b4bd2, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:11,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,098 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c679993f19fe42e0a0266e7d43e86939_a09038ab689acaef0b961036bc4b4bd2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:11,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742392_1568 (size=4469) 2024-12-10T14:28:11,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742391_1567 (size=12697) 2024-12-10T14:28:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T14:28:11,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:11,181 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:28:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:11,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:11,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121016a4e9e7553a4e2e8415740ad06289b1_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_1/A:col10/1733840889405/Put/seqid=0 2024-12-10T14:28:11,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742393_1569 (size=9814) 2024-12-10T14:28:11,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,199 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121016a4e9e7553a4e2e8415740ad06289b1_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121016a4e9e7553a4e2e8415740ad06289b1_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/e067011807ca49cd99d7c8bc3cb72e8a, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:11,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/e067011807ca49cd99d7c8bc3cb72e8a is 175, key is test_row_1/A:col10/1733840889405/Put/seqid=0 2024-12-10T14:28:11,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742394_1570 (size=22461) 2024-12-10T14:28:11,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,511 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#A#compaction#472 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,512 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/cb4b1e54de63438c9136d14608257edc is 175, key is test_row_0/A:col10/1733840888289/Put/seqid=0 2024-12-10T14:28:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742395_1571 (size=31651) 2024-12-10T14:28:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,520 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/cac20f87e1d743868f678d5f3683f31f as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cac20f87e1d743868f678d5f3683f31f 2024-12-10T14:28:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,523 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/cb4b1e54de63438c9136d14608257edc as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/cb4b1e54de63438c9136d14608257edc 2024-12-10T14:28:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,525 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/B of a09038ab689acaef0b961036bc4b4bd2 into cac20f87e1d743868f678d5f3683f31f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:11,525 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:11,525 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/B, priority=13, startTime=1733840891083; duration=0sec 2024-12-10T14:28:11,525 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:11,525 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:B 2024-12-10T14:28:11,525 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,531 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,531 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/C is initiating minor compaction (all files) 2024-12-10T14:28:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,531 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/C in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:11,531 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/3cc8e1e7e51d43918e59e16f5e6ec020, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/c46aa2f469c149f5bda4426eef3770f6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a4cc614f3a0a436fbfc348c52ee646be] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=36.0 K 2024-12-10T14:28:11,532 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cc8e1e7e51d43918e59e16f5e6ec020, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733840885538 2024-12-10T14:28:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,532 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting c46aa2f469c149f5bda4426eef3770f6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733840886150 2024-12-10T14:28:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,533 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a4cc614f3a0a436fbfc348c52ee646be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733840888288 2024-12-10T14:28:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,534 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/A of a09038ab689acaef0b961036bc4b4bd2 into cb4b1e54de63438c9136d14608257edc(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:11,534 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:11,534 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/A, priority=13, startTime=1733840891083; duration=0sec 2024-12-10T14:28:11,534 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:11,534 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:A 2024-12-10T14:28:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,541 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#C#compaction#474 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:11,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,542 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe is 50, key is test_row_0/C:col10/1733840888289/Put/seqid=0 2024-12-10T14:28:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:11,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742396_1572 (size=12697) 2024-12-10T14:28:11,562 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe 2024-12-10T14:28:11,567 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/C of a09038ab689acaef0b961036bc4b4bd2 into 6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:11,567 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:11,567 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/C, priority=13, startTime=1733840891083; duration=0sec 2024-12-10T14:28:11,567 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:11,567 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:C 2024-12-10T14:28:11,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840951573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840951573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840951574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840951573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,605 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/e067011807ca49cd99d7c8bc3cb72e8a 2024-12-10T14:28:11,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/83f9a6e217ca4f1aa55a4b2f987781c0 is 50, key is test_row_1/B:col10/1733840889405/Put/seqid=0 2024-12-10T14:28:11,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742397_1573 (size=9757) 2024-12-10T14:28:11,617 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/83f9a6e217ca4f1aa55a4b2f987781c0 2024-12-10T14:28:11,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/b04f710231dc456c8f3b663524d8f464 is 50, key is test_row_1/C:col10/1733840889405/Put/seqid=0 2024-12-10T14:28:11,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742398_1574 (size=9757) 2024-12-10T14:28:11,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840951678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840951678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840951678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840951679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840951881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840951881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840951881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:11,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:11,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840951882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,044 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/b04f710231dc456c8f3b663524d8f464 2024-12-10T14:28:12,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/e067011807ca49cd99d7c8bc3cb72e8a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/e067011807ca49cd99d7c8bc3cb72e8a 2024-12-10T14:28:12,051 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/e067011807ca49cd99d7c8bc3cb72e8a, entries=100, sequenceid=255, filesize=21.9 K 2024-12-10T14:28:12,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/83f9a6e217ca4f1aa55a4b2f987781c0 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/83f9a6e217ca4f1aa55a4b2f987781c0 2024-12-10T14:28:12,055 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/83f9a6e217ca4f1aa55a4b2f987781c0, entries=100, sequenceid=255, filesize=9.5 K 2024-12-10T14:28:12,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/b04f710231dc456c8f3b663524d8f464 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/b04f710231dc456c8f3b663524d8f464 2024-12-10T14:28:12,058 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/b04f710231dc456c8f3b663524d8f464, entries=100, sequenceid=255, filesize=9.5 K 2024-12-10T14:28:12,059 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=174.43 KB/178620 for a09038ab689acaef0b961036bc4b4bd2 in 878ms, sequenceid=255, compaction requested=false 2024-12-10T14:28:12,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:12,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-10T14:28:12,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-10T14:28:12,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-10T14:28:12,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0980 sec 2024-12-10T14:28:12,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.1010 sec 2024-12-10T14:28:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T14:28:12,066 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-10T14:28:12,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T14:28:12,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-10T14:28:12,069 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T14:28:12,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T14:28:12,069 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T14:28:12,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T14:28:12,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T14:28:12,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:12,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-10T14:28:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:12,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840952185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840952185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840952187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840952187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121006e65019e55d433cbc0b895c4bbdf5c7_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840892184/Put/seqid=0 2024-12-10T14:28:12,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742399_1575 (size=14994) 2024-12-10T14:28:12,196 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:12,199 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121006e65019e55d433cbc0b895c4bbdf5c7_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121006e65019e55d433cbc0b895c4bbdf5c7_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:12,200 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/25538ca0d0ba481897e9f563a01e72d7, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:12,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/25538ca0d0ba481897e9f563a01e72d7 is 175, key is test_row_0/A:col10/1733840892184/Put/seqid=0 2024-12-10T14:28:12,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742400_1576 (size=39949) 2024-12-10T14:28:12,221 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:12,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T14:28:12,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:12,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,222 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840952288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840952290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840952290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T14:28:12,374 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:12,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T14:28:12,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:12,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840952423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,424 DEBUG [Thread-2248 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., hostname=db1d50717577,46699,1733840717757, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:28:12,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840952490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840952491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840952492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,526 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:12,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T14:28:12,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:12,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,604 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/25538ca0d0ba481897e9f563a01e72d7 2024-12-10T14:28:12,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/fb40a6464356464abb31d9fc8443031b is 50, key is test_row_0/B:col10/1733840892184/Put/seqid=0 2024-12-10T14:28:12,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742401_1577 (size=12301) 2024-12-10T14:28:12,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/fb40a6464356464abb31d9fc8443031b 2024-12-10T14:28:12,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/653845ab976446a3926d5ffe4ba24f90 is 50, key is test_row_0/C:col10/1733840892184/Put/seqid=0 2024-12-10T14:28:12,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742402_1578 (size=12301) 2024-12-10T14:28:12,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T14:28:12,678 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:12,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T14:28:12,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:12,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840952691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840952793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840952795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840952795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:12,831 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:12,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T14:28:12,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:12,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,832 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,983 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:12,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T14:28:12,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:12,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:12,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:12,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T14:28:13,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/653845ab976446a3926d5ffe4ba24f90 2024-12-10T14:28:13,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/25538ca0d0ba481897e9f563a01e72d7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/25538ca0d0ba481897e9f563a01e72d7 2024-12-10T14:28:13,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/25538ca0d0ba481897e9f563a01e72d7, entries=200, sequenceid=290, filesize=39.0 K 2024-12-10T14:28:13,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/fb40a6464356464abb31d9fc8443031b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/fb40a6464356464abb31d9fc8443031b 2024-12-10T14:28:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/fb40a6464356464abb31d9fc8443031b, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T14:28:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/653845ab976446a3926d5ffe4ba24f90 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/653845ab976446a3926d5ffe4ba24f90 2024-12-10T14:28:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/653845ab976446a3926d5ffe4ba24f90, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T14:28:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=13.42 KB/13740 for a09038ab689acaef0b961036bc4b4bd2 in 856ms, sequenceid=290, compaction requested=true 2024-12-10T14:28:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:13,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:28:13,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:13,040 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,040 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:28:13,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:13,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:28:13,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,041 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:13,041 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,041 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/B is initiating minor compaction (all files) 2024-12-10T14:28:13,041 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/A is initiating minor compaction (all files) 2024-12-10T14:28:13,041 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/A in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:13,041 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/B in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:13,041 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/cb4b1e54de63438c9136d14608257edc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/e067011807ca49cd99d7c8bc3cb72e8a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/25538ca0d0ba481897e9f563a01e72d7] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=91.9 K 2024-12-10T14:28:13,041 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:13,041 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cac20f87e1d743868f678d5f3683f31f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/83f9a6e217ca4f1aa55a4b2f987781c0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/fb40a6464356464abb31d9fc8443031b] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=33.9 K 2024-12-10T14:28:13,041 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/cb4b1e54de63438c9136d14608257edc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/e067011807ca49cd99d7c8bc3cb72e8a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/25538ca0d0ba481897e9f563a01e72d7] 2024-12-10T14:28:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,042 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb4b1e54de63438c9136d14608257edc, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733840888288 2024-12-10T14:28:13,042 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting cac20f87e1d743868f678d5f3683f31f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733840888288 2024-12-10T14:28:13,042 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 83f9a6e217ca4f1aa55a4b2f987781c0, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840889405 2024-12-10T14:28:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,042 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting e067011807ca49cd99d7c8bc3cb72e8a, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840889405 2024-12-10T14:28:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,042 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting fb40a6464356464abb31d9fc8443031b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840891573 2024-12-10T14:28:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,042 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25538ca0d0ba481897e9f563a01e72d7, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840891570 2024-12-10T14:28:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,048 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:13,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,050 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210cfd442a81d2a476299d844d47bec14ef_a09038ab689acaef0b961036bc4b4bd2 store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:13,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,050 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#B#compaction#481 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:13,051 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/cd8fafe37dc443cab5e3eff2d12f7af4 is 50, key is test_row_0/B:col10/1733840892184/Put/seqid=0 2024-12-10T14:28:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,052 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210cfd442a81d2a476299d844d47bec14ef_a09038ab689acaef0b961036bc4b4bd2, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:13,052 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210cfd442a81d2a476299d844d47bec14ef_a09038ab689acaef0b961036bc4b4bd2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:13,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742404_1580 (size=4469) 2024-12-10T14:28:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742403_1579 (size=12949) 2024-12-10T14:28:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,068 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/cd8fafe37dc443cab5e3eff2d12f7af4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cd8fafe37dc443cab5e3eff2d12f7af4 2024-12-10T14:28:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,073 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/B of a09038ab689acaef0b961036bc4b4bd2 into cd8fafe37dc443cab5e3eff2d12f7af4(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:13,073 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:13,073 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/B, priority=13, startTime=1733840893040; duration=0sec 2024-12-10T14:28:13,073 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:13,073 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:B 2024-12-10T14:28:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,073 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,074 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:13,074 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/C is initiating minor compaction (all files) 2024-12-10T14:28:13,074 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/C in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,074 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/b04f710231dc456c8f3b663524d8f464, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/653845ab976446a3926d5ffe4ba24f90] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=33.9 K 2024-12-10T14:28:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,075 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733840888288 2024-12-10T14:28:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,075 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting b04f710231dc456c8f3b663524d8f464, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733840889405 2024-12-10T14:28:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,075 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 653845ab976446a3926d5ffe4ba24f90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840891573 2024-12-10T14:28:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,081 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#C#compaction#482 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:13,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,082 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/a89d5b4dfbfd41a4b8325e1567f6a23b is 50, key is test_row_0/C:col10/1733840892184/Put/seqid=0 2024-12-10T14:28:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742405_1581 (size=12949) 2024-12-10T14:28:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,094 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/a89d5b4dfbfd41a4b8325e1567f6a23b as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a89d5b4dfbfd41a4b8325e1567f6a23b 2024-12-10T14:28:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,099 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/C of a09038ab689acaef0b961036bc4b4bd2 into a89d5b4dfbfd41a4b8325e1567f6a23b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:13,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:13,099 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/C, priority=13, startTime=1733840893040; duration=0sec 2024-12-10T14:28:13,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,099 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:C 2024-12-10T14:28:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,136 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:13,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46699 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T14:28:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:13,137 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-10T14:28:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:13,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:13,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:13,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:13,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:13,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121038f08b53078a4e789a1fb3e8d1a10e44_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_2/A:col10/1733840892186/Put/seqid=0 2024-12-10T14:28:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742406_1582 (size=7374) 2024-12-10T14:28:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T14:28:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:13,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. as already flushing 2024-12-10T14:28:13,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840953373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840953374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840953375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,432 DEBUG [Thread-2257 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x50bf224f to 127.0.0.1:58494 2024-12-10T14:28:13,432 DEBUG [Thread-2257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:13,433 DEBUG [Thread-2255 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68c2838a to 127.0.0.1:58494 2024-12-10T14:28:13,433 DEBUG [Thread-2255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:13,434 DEBUG [Thread-2261 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d79f1c0 to 127.0.0.1:58494 2024-12-10T14:28:13,434 DEBUG [Thread-2261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:13,434 DEBUG [Thread-2259 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79be903c to 127.0.0.1:58494 2024-12-10T14:28:13,434 DEBUG [Thread-2259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:13,434 DEBUG [Thread-2263 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x40dfd554 to 127.0.0.1:58494 2024-12-10T14:28:13,434 DEBUG [Thread-2263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:13,464 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#A#compaction#480 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:13,464 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/932640c8710e40ae8a60527d3ed31865 is 175, key is test_row_0/A:col10/1733840892184/Put/seqid=0 2024-12-10T14:28:13,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742407_1583 (size=31903) 2024-12-10T14:28:13,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840953476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840953476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840953477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:13,553 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121038f08b53078a4e789a1fb3e8d1a10e44_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121038f08b53078a4e789a1fb3e8d1a10e44_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:13,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/09d56eb1f3b24d59aee000d199c33001, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:13,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/09d56eb1f3b24d59aee000d199c33001 is 175, key is test_row_2/A:col10/1733840892186/Put/seqid=0 2024-12-10T14:28:13,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742408_1584 (size=13865) 2024-12-10T14:28:13,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840953677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840953678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840953679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840953694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,871 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/932640c8710e40ae8a60527d3ed31865 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/932640c8710e40ae8a60527d3ed31865 2024-12-10T14:28:13,873 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/A of a09038ab689acaef0b961036bc4b4bd2 into 932640c8710e40ae8a60527d3ed31865(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:13,874 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:13,874 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/A, priority=13, startTime=1733840893040; duration=0sec 2024-12-10T14:28:13,874 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:13,874 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:A 2024-12-10T14:28:13,958 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/09d56eb1f3b24d59aee000d199c33001 2024-12-10T14:28:13,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/cb7375de3a114c9f8e9625f7c9ffc638 is 50, key is test_row_2/B:col10/1733840892186/Put/seqid=0 2024-12-10T14:28:13,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742409_1585 (size=7415) 2024-12-10T14:28:13,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840953981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840953981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:13,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840953981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:14,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T14:28:14,366 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/cb7375de3a114c9f8e9625f7c9ffc638 2024-12-10T14:28:14,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/7ac6085a8fbe42488fb482e44fff1b6a is 50, key is test_row_2/C:col10/1733840892186/Put/seqid=0 2024-12-10T14:28:14,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742410_1586 (size=7415) 2024-12-10T14:28:14,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:14,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41776 deadline: 1733840954482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41774 deadline: 1733840954482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:14,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:14,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840954483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:14,774 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/7ac6085a8fbe42488fb482e44fff1b6a 2024-12-10T14:28:14,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/09d56eb1f3b24d59aee000d199c33001 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/09d56eb1f3b24d59aee000d199c33001 2024-12-10T14:28:14,779 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/09d56eb1f3b24d59aee000d199c33001, entries=50, sequenceid=297, filesize=13.5 K 2024-12-10T14:28:14,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/cb7375de3a114c9f8e9625f7c9ffc638 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cb7375de3a114c9f8e9625f7c9ffc638 2024-12-10T14:28:14,782 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cb7375de3a114c9f8e9625f7c9ffc638, entries=50, sequenceid=297, filesize=7.2 K 2024-12-10T14:28:14,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/7ac6085a8fbe42488fb482e44fff1b6a as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/7ac6085a8fbe42488fb482e44fff1b6a 2024-12-10T14:28:14,785 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/7ac6085a8fbe42488fb482e44fff1b6a, entries=50, sequenceid=297, filesize=7.2 K 2024-12-10T14:28:14,785 INFO [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=187.85 KB/192360 for a09038ab689acaef0b961036bc4b4bd2 in 1648ms, sequenceid=297, compaction requested=false 2024-12-10T14:28:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:14,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db1d50717577:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-10T14:28:14,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-10T14:28:14,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-10T14:28:14,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7170 sec 2024-12-10T14:28:14,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.7190 sec 2024-12-10T14:28:15,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(8581): Flush requested on a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:15,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-12-10T14:28:15,485 DEBUG [Thread-2246 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e757135 to 127.0.0.1:58494 2024-12-10T14:28:15,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:15,485 DEBUG [Thread-2246 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:15,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:15,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:15,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:15,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:15,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:15,487 DEBUG [Thread-2244 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x69abefea to 127.0.0.1:58494 2024-12-10T14:28:15,487 DEBUG [Thread-2244 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:15,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c87ea92e75314d3e9c117e0d1e710fc5_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840893373/Put/seqid=0 2024-12-10T14:28:15,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:15,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41750 deadline: 1733840955491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:15,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742411_1587 (size=12454) 2024-12-10T14:28:15,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:15,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41784 deadline: 1733840955696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:15,697 DEBUG [Thread-2250 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4124 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., hostname=db1d50717577,46699,1733840717757, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:28:15,895 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:15,897 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c87ea92e75314d3e9c117e0d1e710fc5_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c87ea92e75314d3e9c117e0d1e710fc5_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:15,898 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/85eb0d93866f4083b8c9b0d7cb5c04ba, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:15,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/85eb0d93866f4083b8c9b0d7cb5c04ba is 175, key is test_row_0/A:col10/1733840893373/Put/seqid=0 2024-12-10T14:28:15,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742412_1588 (size=31255) 2024-12-10T14:28:16,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T14:28:16,173 INFO [Thread-2254 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-10T14:28:16,178 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T14:28:16,302 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/85eb0d93866f4083b8c9b0d7cb5c04ba 2024-12-10T14:28:16,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/5e6efa5940014633b8db4c0d4bcb4ef6 is 50, key is test_row_0/B:col10/1733840893373/Put/seqid=0 2024-12-10T14:28:16,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742413_1589 (size=12301) 2024-12-10T14:28:16,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T14:28:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46699 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41752 deadline: 1733840956443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 2024-12-10T14:28:16,444 DEBUG [Thread-2248 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., hostname=db1d50717577,46699,1733840717757, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T14:28:16,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/5e6efa5940014633b8db4c0d4bcb4ef6 2024-12-10T14:28:16,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/fe3bdd2e44094855b5f415b4c6ee4bb7 is 50, key is test_row_0/C:col10/1733840893373/Put/seqid=0 2024-12-10T14:28:16,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742414_1590 (size=12301) 2024-12-10T14:28:17,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/fe3bdd2e44094855b5f415b4c6ee4bb7 2024-12-10T14:28:17,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/85eb0d93866f4083b8c9b0d7cb5c04ba as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/85eb0d93866f4083b8c9b0d7cb5c04ba 2024-12-10T14:28:17,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/85eb0d93866f4083b8c9b0d7cb5c04ba, entries=150, sequenceid=330, filesize=30.5 K 2024-12-10T14:28:17,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/5e6efa5940014633b8db4c0d4bcb4ef6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/5e6efa5940014633b8db4c0d4bcb4ef6 2024-12-10T14:28:17,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/5e6efa5940014633b8db4c0d4bcb4ef6, entries=150, sequenceid=330, filesize=12.0 K 2024-12-10T14:28:17,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/fe3bdd2e44094855b5f415b4c6ee4bb7 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fe3bdd2e44094855b5f415b4c6ee4bb7 2024-12-10T14:28:17,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fe3bdd2e44094855b5f415b4c6ee4bb7, entries=150, sequenceid=330, filesize=12.0 K 2024-12-10T14:28:17,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for a09038ab689acaef0b961036bc4b4bd2 in 1646ms, sequenceid=330, compaction requested=true 2024-12-10T14:28:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T14:28:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T14:28:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a09038ab689acaef0b961036bc4b4bd2:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T14:28:17,131 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:17,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:17,131 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:17,132 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32665 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:17,132 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:17,132 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/B is initiating minor compaction (all files) 2024-12-10T14:28:17,132 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/A is initiating minor compaction (all files) 2024-12-10T14:28:17,132 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/B in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:17,132 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/A in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:17,132 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cd8fafe37dc443cab5e3eff2d12f7af4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cb7375de3a114c9f8e9625f7c9ffc638, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/5e6efa5940014633b8db4c0d4bcb4ef6] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=31.9 K 2024-12-10T14:28:17,132 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/932640c8710e40ae8a60527d3ed31865, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/09d56eb1f3b24d59aee000d199c33001, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/85eb0d93866f4083b8c9b0d7cb5c04ba] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=75.2 K 2024-12-10T14:28:17,132 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:17,132 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. files: [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/932640c8710e40ae8a60527d3ed31865, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/09d56eb1f3b24d59aee000d199c33001, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/85eb0d93866f4083b8c9b0d7cb5c04ba] 2024-12-10T14:28:17,132 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting cd8fafe37dc443cab5e3eff2d12f7af4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840891573 2024-12-10T14:28:17,132 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 932640c8710e40ae8a60527d3ed31865, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840891573 2024-12-10T14:28:17,133 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting cb7375de3a114c9f8e9625f7c9ffc638, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733840892186 2024-12-10T14:28:17,133 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09d56eb1f3b24d59aee000d199c33001, keycount=50, bloomtype=ROW, size=13.5 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733840892186 2024-12-10T14:28:17,133 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e6efa5940014633b8db4c0d4bcb4ef6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733840893371 2024-12-10T14:28:17,133 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85eb0d93866f4083b8c9b0d7cb5c04ba, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733840893371 2024-12-10T14:28:17,138 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#B#compaction#489 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:17,138 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:17,138 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e41dda5af917497a9c2ee49f472c92c6 is 50, key is test_row_0/B:col10/1733840893373/Put/seqid=0 2024-12-10T14:28:17,139 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121002e54536283f450a9ee30d90645f4499_a09038ab689acaef0b961036bc4b4bd2 store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:17,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742415_1591 (size=13051) 2024-12-10T14:28:17,142 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121002e54536283f450a9ee30d90645f4499_a09038ab689acaef0b961036bc4b4bd2, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:17,142 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121002e54536283f450a9ee30d90645f4499_a09038ab689acaef0b961036bc4b4bd2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:17,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742416_1592 (size=4469) 2024-12-10T14:28:17,506 DEBUG [Thread-2252 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:58494 2024-12-10T14:28:17,506 DEBUG [Thread-2252 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:17,545 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/e41dda5af917497a9c2ee49f472c92c6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e41dda5af917497a9c2ee49f472c92c6 2024-12-10T14:28:17,546 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#A#compaction#490 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:17,546 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/4471de3bf8f648f6a22419fff22bb3df is 175, key is test_row_0/A:col10/1733840893373/Put/seqid=0 2024-12-10T14:28:17,548 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/B of a09038ab689acaef0b961036bc4b4bd2 into e41dda5af917497a9c2ee49f472c92c6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:17,548 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:17,548 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/B, priority=13, startTime=1733840897131; duration=0sec 2024-12-10T14:28:17,548 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T14:28:17,548 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:B 2024-12-10T14:28:17,548 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T14:28:17,549 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32665 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T14:28:17,549 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1540): a09038ab689acaef0b961036bc4b4bd2/C is initiating minor compaction (all files) 2024-12-10T14:28:17,549 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a09038ab689acaef0b961036bc4b4bd2/C in TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:17,549 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a89d5b4dfbfd41a4b8325e1567f6a23b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/7ac6085a8fbe42488fb482e44fff1b6a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fe3bdd2e44094855b5f415b4c6ee4bb7] into tmpdir=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp, totalSize=31.9 K 2024-12-10T14:28:17,549 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting a89d5b4dfbfd41a4b8325e1567f6a23b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733840891573 2024-12-10T14:28:17,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742417_1593 (size=32005) 2024-12-10T14:28:17,549 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ac6085a8fbe42488fb482e44fff1b6a, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733840892186 2024-12-10T14:28:17,550 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] compactions.Compactor(224): Compacting fe3bdd2e44094855b5f415b4c6ee4bb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733840893371 2024-12-10T14:28:17,555 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a09038ab689acaef0b961036bc4b4bd2#C#compaction#491 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T14:28:17,555 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/ec7e3927fd154e11ade2b5c2be14adb6 is 50, key is test_row_0/C:col10/1733840893373/Put/seqid=0 2024-12-10T14:28:17,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742418_1594 (size=13051) 2024-12-10T14:28:17,953 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/4471de3bf8f648f6a22419fff22bb3df as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/4471de3bf8f648f6a22419fff22bb3df 2024-12-10T14:28:17,957 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/A of a09038ab689acaef0b961036bc4b4bd2 into 4471de3bf8f648f6a22419fff22bb3df(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:17,957 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:17,957 INFO [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/A, priority=13, startTime=1733840897131; duration=0sec 2024-12-10T14:28:17,957 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:17,957 DEBUG [RS:0;db1d50717577:46699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:A 2024-12-10T14:28:17,960 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/ec7e3927fd154e11ade2b5c2be14adb6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ec7e3927fd154e11ade2b5c2be14adb6 2024-12-10T14:28:17,963 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a09038ab689acaef0b961036bc4b4bd2/C of a09038ab689acaef0b961036bc4b4bd2 into ec7e3927fd154e11ade2b5c2be14adb6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T14:28:17,963 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:17,963 INFO [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2., storeName=a09038ab689acaef0b961036bc4b4bd2/C, priority=13, startTime=1733840897131; duration=0sec 2024-12-10T14:28:17,963 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T14:28:17,963 DEBUG [RS:0;db1d50717577:46699-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a09038ab689acaef0b961036bc4b4bd2:C 2024-12-10T14:28:19,721 DEBUG [Thread-2250 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:58494 2024-12-10T14:28:19,721 DEBUG [Thread-2250 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:26,523 DEBUG [Thread-2248 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:58494 2024-12-10T14:28:26,523 DEBUG [Thread-2248 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8218 2024-12-10T14:28:26,523 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7512 2024-12-10T14:28:26,524 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7793 2024-12-10T14:28:26,524 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8203 2024-12-10T14:28:26,524 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7505 2024-12-10T14:28:26,524 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T14:28:26,524 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:28:26,524 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11030ef5 to 127.0.0.1:58494 2024-12-10T14:28:26,524 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:26,524 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T14:28:26,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T14:28:26,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T14:28:26,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:28:26,527 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840906526"}]},"ts":"1733840906526"} 2024-12-10T14:28:26,527 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T14:28:26,531 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T14:28:26,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T14:28:26,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, UNASSIGN}] 2024-12-10T14:28:26,532 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, UNASSIGN 2024-12-10T14:28:26,533 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=CLOSING, regionLocation=db1d50717577,46699,1733840717757 2024-12-10T14:28:26,533 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T14:28:26,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; CloseRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757}] 2024-12-10T14:28:26,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:28:26,684 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db1d50717577,46699,1733840717757 2024-12-10T14:28:26,685 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(124): Close a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1681): Closing a09038ab689acaef0b961036bc4b4bd2, disabling compactions & flushes 2024-12-10T14:28:26,685 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. after waiting 0 ms 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:26,685 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(2837): Flushing a09038ab689acaef0b961036bc4b4bd2 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=A 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=B 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a09038ab689acaef0b961036bc4b4bd2, store=C 2024-12-10T14:28:26,685 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T14:28:26,690 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210459ea596bcc74afd89603478cf5ead51_a09038ab689acaef0b961036bc4b4bd2 is 50, key is test_row_0/A:col10/1733840895486/Put/seqid=0 2024-12-10T14:28:26,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742419_1595 (size=12454) 2024-12-10T14:28:26,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:28:27,093 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T14:28:27,096 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210459ea596bcc74afd89603478cf5ead51_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210459ea596bcc74afd89603478cf5ead51_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:27,097 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/eab3c0a0200e40e0a8dd5fc4d9454779, store: [table=TestAcidGuarantees family=A region=a09038ab689acaef0b961036bc4b4bd2] 2024-12-10T14:28:27,097 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/eab3c0a0200e40e0a8dd5fc4d9454779 is 175, key is test_row_0/A:col10/1733840895486/Put/seqid=0 2024-12-10T14:28:27,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742420_1596 (size=31255) 2024-12-10T14:28:27,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:28:27,501 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=340, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/eab3c0a0200e40e0a8dd5fc4d9454779 2024-12-10T14:28:27,505 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/826b838c28d44dc09638d7e2042a60d4 is 50, key is test_row_0/B:col10/1733840895486/Put/seqid=0 2024-12-10T14:28:27,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742421_1597 (size=12301) 2024-12-10T14:28:27,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:28:27,909 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/826b838c28d44dc09638d7e2042a60d4 2024-12-10T14:28:27,914 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/ee96bffe33754372bfb48090c9460494 is 50, key is test_row_0/C:col10/1733840895486/Put/seqid=0 2024-12-10T14:28:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742422_1598 (size=12301) 2024-12-10T14:28:28,317 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/ee96bffe33754372bfb48090c9460494 2024-12-10T14:28:28,320 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/A/eab3c0a0200e40e0a8dd5fc4d9454779 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/eab3c0a0200e40e0a8dd5fc4d9454779 2024-12-10T14:28:28,322 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/eab3c0a0200e40e0a8dd5fc4d9454779, entries=150, sequenceid=340, filesize=30.5 K 2024-12-10T14:28:28,323 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/B/826b838c28d44dc09638d7e2042a60d4 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/826b838c28d44dc09638d7e2042a60d4 2024-12-10T14:28:28,325 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/826b838c28d44dc09638d7e2042a60d4, entries=150, sequenceid=340, filesize=12.0 K 2024-12-10T14:28:28,326 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/.tmp/C/ee96bffe33754372bfb48090c9460494 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ee96bffe33754372bfb48090c9460494 2024-12-10T14:28:28,328 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ee96bffe33754372bfb48090c9460494, entries=150, sequenceid=340, filesize=12.0 K 2024-12-10T14:28:28,328 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for a09038ab689acaef0b961036bc4b4bd2 in 1643ms, sequenceid=340, compaction requested=false 2024-12-10T14:28:28,329 DEBUG [StoreCloser-TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/bb9a00276564451aa08d1271cb5a31cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/ee72c5ea2b5b434085b3a4bc508fd480, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/71bfceb9127f47ca8e0c4a1047543243, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b4d637d0c00042e9995419aa5373eabf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b82efe5f5cf24d0e8fb8974b244034de, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/536de0d9d2a747a3b1182504021910a1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b9f049599d72489fa35576752b0d715c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/755b929230b54dbea4af97a698c3e7bf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b18a1135bbb2450983cb866aa6cc4fb2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b62fd3d8ca2b4976b8aa487e51d8be6c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/0f061ca421c04d8ea807310158e02d52, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/1e93262bff8d43a5944fd0f93f3262ab, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6f574b5d1b4b431694fca765fe5d623d, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6b57bda8ca9f492fa019a19288470c0a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/712402941a5f452380b554d7950cdab1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/086a8f49d30c407eb6e07e23c3311d7b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/cb4b1e54de63438c9136d14608257edc, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/e067011807ca49cd99d7c8bc3cb72e8a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/25538ca0d0ba481897e9f563a01e72d7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/932640c8710e40ae8a60527d3ed31865, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/09d56eb1f3b24d59aee000d199c33001, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/85eb0d93866f4083b8c9b0d7cb5c04ba] to archive 2024-12-10T14:28:28,330 DEBUG [StoreCloser-TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:28:28,332 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/ee72c5ea2b5b434085b3a4bc508fd480 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/ee72c5ea2b5b434085b3a4bc508fd480 2024-12-10T14:28:28,332 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/bb9a00276564451aa08d1271cb5a31cb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/bb9a00276564451aa08d1271cb5a31cb 2024-12-10T14:28:28,332 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/71bfceb9127f47ca8e0c4a1047543243 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/71bfceb9127f47ca8e0c4a1047543243 2024-12-10T14:28:28,332 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b82efe5f5cf24d0e8fb8974b244034de to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b82efe5f5cf24d0e8fb8974b244034de 2024-12-10T14:28:28,334 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b4d637d0c00042e9995419aa5373eabf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b4d637d0c00042e9995419aa5373eabf 2024-12-10T14:28:28,334 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/536de0d9d2a747a3b1182504021910a1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/536de0d9d2a747a3b1182504021910a1 2024-12-10T14:28:28,334 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/1e93262bff8d43a5944fd0f93f3262ab to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/1e93262bff8d43a5944fd0f93f3262ab 2024-12-10T14:28:28,334 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/0f061ca421c04d8ea807310158e02d52 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/0f061ca421c04d8ea807310158e02d52 2024-12-10T14:28:28,335 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/755b929230b54dbea4af97a698c3e7bf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/755b929230b54dbea4af97a698c3e7bf 2024-12-10T14:28:28,335 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b62fd3d8ca2b4976b8aa487e51d8be6c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b62fd3d8ca2b4976b8aa487e51d8be6c 2024-12-10T14:28:28,335 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b9f049599d72489fa35576752b0d715c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b9f049599d72489fa35576752b0d715c 2024-12-10T14:28:28,335 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b18a1135bbb2450983cb866aa6cc4fb2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/b18a1135bbb2450983cb866aa6cc4fb2 2024-12-10T14:28:28,335 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6f574b5d1b4b431694fca765fe5d623d to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6f574b5d1b4b431694fca765fe5d623d 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/086a8f49d30c407eb6e07e23c3311d7b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/086a8f49d30c407eb6e07e23c3311d7b 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/cb4b1e54de63438c9136d14608257edc to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/cb4b1e54de63438c9136d14608257edc 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/e067011807ca49cd99d7c8bc3cb72e8a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/e067011807ca49cd99d7c8bc3cb72e8a 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/932640c8710e40ae8a60527d3ed31865 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/932640c8710e40ae8a60527d3ed31865 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/25538ca0d0ba481897e9f563a01e72d7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/25538ca0d0ba481897e9f563a01e72d7 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/712402941a5f452380b554d7950cdab1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/712402941a5f452380b554d7950cdab1 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6b57bda8ca9f492fa019a19288470c0a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/6b57bda8ca9f492fa019a19288470c0a 2024-12-10T14:28:28,336 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/09d56eb1f3b24d59aee000d199c33001 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/09d56eb1f3b24d59aee000d199c33001 2024-12-10T14:28:28,337 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/85eb0d93866f4083b8c9b0d7cb5c04ba to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/85eb0d93866f4083b8c9b0d7cb5c04ba 2024-12-10T14:28:28,338 DEBUG [StoreCloser-TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/0f18e3eae83545039b87bf0555a3fdb0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ab90d0f78fb24d959fb485ad040d6fb8, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/f0d7351584b3468dbae2d039233aea18, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/352c8f54b2f9475aab0ff10ea6cfc671, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/7f137a1ed808429397bf4c0f5bc576ff, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/8491e473a2ac47c1ab117e444537b332, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/811391b31ac749f89d0e7dfcb0f363d7, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a7ff227d438b4d898e4f49ea6d1409c2, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/54fdfcf264da4d44bfb067cefefe1bf4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/232a7847382b4f4cb3731947c5699ddb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e2a0d9b7fdda4235b8c55319cf236007, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ffb617eb03454b628f0b8e0797098499, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/c634d28ae1444e1cae09666fc8db30cb, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a1b9288697b44a8daf6d7621623efd5c, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e998880baaea4d0db948c43b840bca3a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cac20f87e1d743868f678d5f3683f31f, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/1305fdfc32024c83b8824fc56d03be9a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/83f9a6e217ca4f1aa55a4b2f987781c0, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cd8fafe37dc443cab5e3eff2d12f7af4, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/fb40a6464356464abb31d9fc8443031b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cb7375de3a114c9f8e9625f7c9ffc638, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/5e6efa5940014633b8db4c0d4bcb4ef6] to archive 2024-12-10T14:28:28,339 DEBUG [StoreCloser-TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:28:28,340 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/f0d7351584b3468dbae2d039233aea18 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/f0d7351584b3468dbae2d039233aea18 2024-12-10T14:28:28,340 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/352c8f54b2f9475aab0ff10ea6cfc671 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/352c8f54b2f9475aab0ff10ea6cfc671 2024-12-10T14:28:28,340 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ab90d0f78fb24d959fb485ad040d6fb8 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ab90d0f78fb24d959fb485ad040d6fb8 2024-12-10T14:28:28,341 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/8491e473a2ac47c1ab117e444537b332 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/8491e473a2ac47c1ab117e444537b332 2024-12-10T14:28:28,341 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/0f18e3eae83545039b87bf0555a3fdb0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/0f18e3eae83545039b87bf0555a3fdb0 2024-12-10T14:28:28,341 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/811391b31ac749f89d0e7dfcb0f363d7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/811391b31ac749f89d0e7dfcb0f363d7 2024-12-10T14:28:28,341 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a7ff227d438b4d898e4f49ea6d1409c2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a7ff227d438b4d898e4f49ea6d1409c2 2024-12-10T14:28:28,341 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/7f137a1ed808429397bf4c0f5bc576ff to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/7f137a1ed808429397bf4c0f5bc576ff 2024-12-10T14:28:28,342 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/54fdfcf264da4d44bfb067cefefe1bf4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/54fdfcf264da4d44bfb067cefefe1bf4 2024-12-10T14:28:28,342 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ffb617eb03454b628f0b8e0797098499 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/ffb617eb03454b628f0b8e0797098499 2024-12-10T14:28:28,342 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/232a7847382b4f4cb3731947c5699ddb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/232a7847382b4f4cb3731947c5699ddb 2024-12-10T14:28:28,342 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e2a0d9b7fdda4235b8c55319cf236007 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e2a0d9b7fdda4235b8c55319cf236007 2024-12-10T14:28:28,342 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a1b9288697b44a8daf6d7621623efd5c to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/a1b9288697b44a8daf6d7621623efd5c 2024-12-10T14:28:28,342 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e998880baaea4d0db948c43b840bca3a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e998880baaea4d0db948c43b840bca3a 2024-12-10T14:28:28,343 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/c634d28ae1444e1cae09666fc8db30cb to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/c634d28ae1444e1cae09666fc8db30cb 2024-12-10T14:28:28,343 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cac20f87e1d743868f678d5f3683f31f to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cac20f87e1d743868f678d5f3683f31f 2024-12-10T14:28:28,343 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/1305fdfc32024c83b8824fc56d03be9a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/1305fdfc32024c83b8824fc56d03be9a 2024-12-10T14:28:28,343 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cb7375de3a114c9f8e9625f7c9ffc638 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cb7375de3a114c9f8e9625f7c9ffc638 2024-12-10T14:28:28,343 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cd8fafe37dc443cab5e3eff2d12f7af4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/cd8fafe37dc443cab5e3eff2d12f7af4 2024-12-10T14:28:28,344 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/83f9a6e217ca4f1aa55a4b2f987781c0 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/83f9a6e217ca4f1aa55a4b2f987781c0 2024-12-10T14:28:28,344 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/5e6efa5940014633b8db4c0d4bcb4ef6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/5e6efa5940014633b8db4c0d4bcb4ef6 2024-12-10T14:28:28,344 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/fb40a6464356464abb31d9fc8443031b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/fb40a6464356464abb31d9fc8443031b 2024-12-10T14:28:28,345 DEBUG [StoreCloser-TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fdb343bca55543578511d134b556ceb3, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/63d014170c1b4c689d7046c270565518, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/187d8195b32c42bc80d08cc0f19b2e08, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/4fe77c5aa9224404bd1fd488f115497a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/8a67d5f169e742c086103ee492dd5c66, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/65b1ae12c3a24e14863c0827999d7a25, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ea314c37c89b4503b48d6c19082c25c1, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/86e4bbfab84a4b19a68db17807b02b3b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/f6b194039010414ba4f7d763ad4fae23, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a0a959aefdd340d5a9d39d0ba1ce93cf, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/e4dea5d27cac4adf8941969f269d0284, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/21bcb865ac1c44aaa9946e89eaa3c79a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/3cc8e1e7e51d43918e59e16f5e6ec020, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/231a5d9747724fb2928ed319ba60056a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/c46aa2f469c149f5bda4426eef3770f6, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a4cc614f3a0a436fbfc348c52ee646be, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/b04f710231dc456c8f3b663524d8f464, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a89d5b4dfbfd41a4b8325e1567f6a23b, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/653845ab976446a3926d5ffe4ba24f90, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/7ac6085a8fbe42488fb482e44fff1b6a, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fe3bdd2e44094855b5f415b4c6ee4bb7] to archive 2024-12-10T14:28:28,345 DEBUG [StoreCloser-TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T14:28:28,347 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fdb343bca55543578511d134b556ceb3 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fdb343bca55543578511d134b556ceb3 2024-12-10T14:28:28,347 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/4fe77c5aa9224404bd1fd488f115497a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/4fe77c5aa9224404bd1fd488f115497a 2024-12-10T14:28:28,347 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/65b1ae12c3a24e14863c0827999d7a25 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/65b1ae12c3a24e14863c0827999d7a25 2024-12-10T14:28:28,347 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/187d8195b32c42bc80d08cc0f19b2e08 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/187d8195b32c42bc80d08cc0f19b2e08 2024-12-10T14:28:28,347 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ea314c37c89b4503b48d6c19082c25c1 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ea314c37c89b4503b48d6c19082c25c1 2024-12-10T14:28:28,348 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/63d014170c1b4c689d7046c270565518 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/63d014170c1b4c689d7046c270565518 2024-12-10T14:28:28,348 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/86e4bbfab84a4b19a68db17807b02b3b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/86e4bbfab84a4b19a68db17807b02b3b 2024-12-10T14:28:28,348 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/8a67d5f169e742c086103ee492dd5c66 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/8a67d5f169e742c086103ee492dd5c66 2024-12-10T14:28:28,349 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/21bcb865ac1c44aaa9946e89eaa3c79a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/21bcb865ac1c44aaa9946e89eaa3c79a 2024-12-10T14:28:28,349 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a0a959aefdd340d5a9d39d0ba1ce93cf to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a0a959aefdd340d5a9d39d0ba1ce93cf 2024-12-10T14:28:28,349 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/f6b194039010414ba4f7d763ad4fae23 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/f6b194039010414ba4f7d763ad4fae23 2024-12-10T14:28:28,349 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/231a5d9747724fb2928ed319ba60056a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/231a5d9747724fb2928ed319ba60056a 2024-12-10T14:28:28,349 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/e4dea5d27cac4adf8941969f269d0284 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/e4dea5d27cac4adf8941969f269d0284 2024-12-10T14:28:28,349 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/3cc8e1e7e51d43918e59e16f5e6ec020 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/3cc8e1e7e51d43918e59e16f5e6ec020 2024-12-10T14:28:28,350 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/c46aa2f469c149f5bda4426eef3770f6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/c46aa2f469c149f5bda4426eef3770f6 2024-12-10T14:28:28,350 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/6d1ca66ecd4c4c1e8ad8f5a55bf2e1fe 2024-12-10T14:28:28,350 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a4cc614f3a0a436fbfc348c52ee646be to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a4cc614f3a0a436fbfc348c52ee646be 2024-12-10T14:28:28,351 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a89d5b4dfbfd41a4b8325e1567f6a23b to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/a89d5b4dfbfd41a4b8325e1567f6a23b 2024-12-10T14:28:28,351 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fe3bdd2e44094855b5f415b4c6ee4bb7 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/fe3bdd2e44094855b5f415b4c6ee4bb7 2024-12-10T14:28:28,351 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/b04f710231dc456c8f3b663524d8f464 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/b04f710231dc456c8f3b663524d8f464 2024-12-10T14:28:28,351 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/7ac6085a8fbe42488fb482e44fff1b6a to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/7ac6085a8fbe42488fb482e44fff1b6a 2024-12-10T14:28:28,351 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/653845ab976446a3926d5ffe4ba24f90 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/653845ab976446a3926d5ffe4ba24f90 2024-12-10T14:28:28,354 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=4 2024-12-10T14:28:28,355 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2. 2024-12-10T14:28:28,355 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1635): Region close journal for a09038ab689acaef0b961036bc4b4bd2: 2024-12-10T14:28:28,356 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(170): Closed a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,356 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=a09038ab689acaef0b961036bc4b4bd2, regionState=CLOSED 2024-12-10T14:28:28,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-10T14:28:28,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseRegionProcedure a09038ab689acaef0b961036bc4b4bd2, server=db1d50717577,46699,1733840717757 in 1.8240 sec 2024-12-10T14:28:28,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-10T14:28:28,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a09038ab689acaef0b961036bc4b4bd2, UNASSIGN in 1.8260 sec 2024-12-10T14:28:28,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-10T14:28:28,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8280 sec 2024-12-10T14:28:28,361 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733840908361"}]},"ts":"1733840908361"} 2024-12-10T14:28:28,361 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T14:28:28,363 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T14:28:28,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8390 sec 2024-12-10T14:28:28,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T14:28:28,630 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-10T14:28:28,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T14:28:28,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:28:28,631 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:28:28,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T14:28:28,631 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:28:28,633 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,635 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C, FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/recovered.edits] 2024-12-10T14:28:28,637 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/4471de3bf8f648f6a22419fff22bb3df to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/4471de3bf8f648f6a22419fff22bb3df 2024-12-10T14:28:28,637 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/eab3c0a0200e40e0a8dd5fc4d9454779 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/A/eab3c0a0200e40e0a8dd5fc4d9454779 2024-12-10T14:28:28,639 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/826b838c28d44dc09638d7e2042a60d4 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/826b838c28d44dc09638d7e2042a60d4 2024-12-10T14:28:28,639 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e41dda5af917497a9c2ee49f472c92c6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/B/e41dda5af917497a9c2ee49f472c92c6 2024-12-10T14:28:28,641 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ee96bffe33754372bfb48090c9460494 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ee96bffe33754372bfb48090c9460494 2024-12-10T14:28:28,641 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ec7e3927fd154e11ade2b5c2be14adb6 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/C/ec7e3927fd154e11ade2b5c2be14adb6 2024-12-10T14:28:28,643 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/recovered.edits/343.seqid to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2/recovered.edits/343.seqid 2024-12-10T14:28:28,643 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/default/TestAcidGuarantees/a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,643 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T14:28:28,643 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T14:28:28,644 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121004c8744446744847b25a25e174fe4d09_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121004c8744446744847b25a25e174fe4d09_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121016a4e9e7553a4e2e8415740ad06289b1_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121016a4e9e7553a4e2e8415740ad06289b1_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121006e65019e55d433cbc0b895c4bbdf5c7_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121006e65019e55d433cbc0b895c4bbdf5c7_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210211bba89a21c416a853632a9009cbf59_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210211bba89a21c416a853632a9009cbf59_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102311bcb8485a4a25a237b2f4734b1011_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102311bcb8485a4a25a237b2f4734b1011_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210459ea596bcc74afd89603478cf5ead51_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210459ea596bcc74afd89603478cf5ead51_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121038f08b53078a4e789a1fb3e8d1a10e44_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121038f08b53078a4e789a1fb3e8d1a10e44_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,649 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210209eeb1bfa4649b7819be9ed315e7fdc_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210209eeb1bfa4649b7819be9ed315e7fdc_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,650 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107458cf839cc3443aa88a09aa457887e0_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107458cf839cc3443aa88a09aa457887e0_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,650 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ab881d7ac68a47e6a6768314f2228171_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ab881d7ac68a47e6a6768314f2228171_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,650 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121095d5b01728174e8eb014aaad16ef6c62_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121095d5b01728174e8eb014aaad16ef6c62_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,650 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c87ea92e75314d3e9c117e0d1e710fc5_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c87ea92e75314d3e9c117e0d1e710fc5_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,650 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121083feb94eae294094a7504e50c15bea43_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121083feb94eae294094a7504e50c15bea43_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,650 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bbec03b9aee944ae8719633f3d80fdc2_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bbec03b9aee944ae8719633f3d80fdc2_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,650 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dd4e94f5217f44df9411cf26dc447d8c_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dd4e94f5217f44df9411cf26dc447d8c_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,651 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dd73882794874d80a2dc93f23a8f3102_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dd73882794874d80a2dc93f23a8f3102_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,651 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f2550246b97e4598ba0bf499f9e6f085_a09038ab689acaef0b961036bc4b4bd2 to hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f2550246b97e4598ba0bf499f9e6f085_a09038ab689acaef0b961036bc4b4bd2 2024-12-10T14:28:28,652 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T14:28:28,653 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:28:28,655 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T14:28:28,657 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T14:28:28,658 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:28:28,658 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T14:28:28,658 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733840908658"}]},"ts":"9223372036854775807"} 2024-12-10T14:28:28,659 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T14:28:28,659 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a09038ab689acaef0b961036bc4b4bd2, NAME => 'TestAcidGuarantees,,1733840870359.a09038ab689acaef0b961036bc4b4bd2.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T14:28:28,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T14:28:28,659 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733840908659"}]},"ts":"9223372036854775807"} 2024-12-10T14:28:28,660 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T14:28:28,662 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T14:28:28,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 32 msec 2024-12-10T14:28:28,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33823 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T14:28:28,732 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-10T14:28:28,741 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244 (was 244), OpenFileDescriptor=449 (was 450), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=258 (was 303), ProcessCount=11 (was 11), AvailableMemoryMB=2224 (was 2276) 2024-12-10T14:28:28,741 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-10T14:28:28,741 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T14:28:28,741 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76523d14 to 127.0.0.1:58494 2024-12-10T14:28:28,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:28,742 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T14:28:28,742 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1531552996, stopped=false 2024-12-10T14:28:28,742 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=db1d50717577,33823,1733840717045 2024-12-10T14:28:28,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T14:28:28,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T14:28:28,744 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-10T14:28:28,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:28:28,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:28:28,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:28:28,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T14:28:28,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:28,744 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db1d50717577,46699,1733840717757' ***** 2024-12-10T14:28:28,745 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-10T14:28:28,745 INFO [RS:0;db1d50717577:46699 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T14:28:28,745 INFO [RS:0;db1d50717577:46699 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T14:28:28,745 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-10T14:28:28,745 INFO [RS:0;db1d50717577:46699 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T14:28:28,745 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(3579): Received CLOSE for d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:28:28,745 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1224): stopping server db1d50717577,46699,1733840717757 2024-12-10T14:28:28,745 DEBUG [RS:0;db1d50717577:46699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:28,745 INFO [RS:0;db1d50717577:46699 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T14:28:28,746 INFO [RS:0;db1d50717577:46699 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T14:28:28,746 INFO [RS:0;db1d50717577:46699 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T14:28:28,746 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d677ce41b1f947badc4a07f8de4e4b16, disabling compactions & flushes 2024-12-10T14:28:28,746 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:28:28,746 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:28:28,746 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1603): Online Regions={d677ce41b1f947badc4a07f8de4e4b16=hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16., 1588230740=hbase:meta,,1.1588230740} 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. after waiting 0 ms 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:28:28,746 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing d677ce41b1f947badc4a07f8de4e4b16 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T14:28:28,746 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T14:28:28,746 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T14:28:28,746 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-10T14:28:28,749 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, d677ce41b1f947badc4a07f8de4e4b16 2024-12-10T14:28:28,763 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16/.tmp/info/855bba09e83a474481c32be6aacda4a3 is 45, key is default/info:d/1733840722232/Put/seqid=0 2024-12-10T14:28:28,763 INFO [regionserver/db1d50717577:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:28:28,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742423_1599 (size=5037) 2024-12-10T14:28:28,766 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16/.tmp/info/855bba09e83a474481c32be6aacda4a3 2024-12-10T14:28:28,769 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16/.tmp/info/855bba09e83a474481c32be6aacda4a3 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16/info/855bba09e83a474481c32be6aacda4a3 2024-12-10T14:28:28,771 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/info/7340a417a3a44001bdfe075af56eb728 is 143, key is hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16./info:regioninfo/1733840722109/Put/seqid=0 2024-12-10T14:28:28,772 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16/info/855bba09e83a474481c32be6aacda4a3, entries=2, sequenceid=6, filesize=4.9 K 2024-12-10T14:28:28,773 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for d677ce41b1f947badc4a07f8de4e4b16 in 27ms, sequenceid=6, compaction requested=false 2024-12-10T14:28:28,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742424_1600 (size=7725) 2024-12-10T14:28:28,778 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/namespace/d677ce41b1f947badc4a07f8de4e4b16/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T14:28:28,779 INFO [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:28:28,779 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d677ce41b1f947badc4a07f8de4e4b16: 2024-12-10T14:28:28,779 DEBUG [RS_CLOSE_REGION-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733840720882.d677ce41b1f947badc4a07f8de4e4b16. 2024-12-10T14:28:28,949 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T14:28:29,150 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T14:28:29,176 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/info/7340a417a3a44001bdfe075af56eb728 2024-12-10T14:28:29,193 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/rep_barrier/9950928fa939428490e574d975c70a20 is 102, key is TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff./rep_barrier:/1733840748653/DeleteFamily/seqid=0 2024-12-10T14:28:29,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742425_1601 (size=6025) 2024-12-10T14:28:29,350 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T14:28:29,434 WARN [BootstrapNodeManager {}] regionserver.BootstrapNodeManager(140): failed to get live region servers from master org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=db1d50717577:33823 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$200(AbstractRpcClient.java:94) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.getLiveRegionServers(RegionServerStatusProtos.java:17362) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionImplementation.getLiveRegionServers(ConnectionImplementation.java:2376) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.BootstrapNodeManager.getFromMaster(BootstrapNodeManager.java:138) ~[classes/:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] ... 12 more 2024-12-10T14:28:29,550 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T14:28:29,568 INFO [regionserver/db1d50717577:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T14:28:29,568 INFO [regionserver/db1d50717577:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T14:28:29,597 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/rep_barrier/9950928fa939428490e574d975c70a20 2024-12-10T14:28:29,614 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/table/bf970731de0c4a99a57e326912e3217c is 96, key is TestAcidGuarantees,,1733840722394.3cb281b62d072b2e7312c326c99dffff./table:/1733840748653/DeleteFamily/seqid=0 2024-12-10T14:28:29,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742426_1602 (size=5942) 2024-12-10T14:28:29,750 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-10T14:28:29,750 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-10T14:28:29,750 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T14:28:29,951 DEBUG [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T14:28:30,017 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/table/bf970731de0c4a99a57e326912e3217c 2024-12-10T14:28:30,020 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/info/7340a417a3a44001bdfe075af56eb728 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/info/7340a417a3a44001bdfe075af56eb728 2024-12-10T14:28:30,023 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/info/7340a417a3a44001bdfe075af56eb728, entries=22, sequenceid=93, filesize=7.5 K 2024-12-10T14:28:30,023 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/rep_barrier/9950928fa939428490e574d975c70a20 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/rep_barrier/9950928fa939428490e574d975c70a20 2024-12-10T14:28:30,026 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/rep_barrier/9950928fa939428490e574d975c70a20, entries=6, sequenceid=93, filesize=5.9 K 2024-12-10T14:28:30,026 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/.tmp/table/bf970731de0c4a99a57e326912e3217c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/table/bf970731de0c4a99a57e326912e3217c 2024-12-10T14:28:30,028 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/table/bf970731de0c4a99a57e326912e3217c, entries=9, sequenceid=93, filesize=5.8 K 2024-12-10T14:28:30,029 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1283ms, sequenceid=93, compaction requested=false 2024-12-10T14:28:30,032 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-10T14:28:30,033 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T14:28:30,033 INFO [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T14:28:30,033 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T14:28:30,033 DEBUG [RS_CLOSE_META-regionserver/db1d50717577:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T14:28:30,151 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1250): stopping server db1d50717577,46699,1733840717757; all regions closed. 2024-12-10T14:28:30,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741834_1010 (size=26050) 2024-12-10T14:28:30,156 DEBUG [RS:0;db1d50717577:46699 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/oldWALs 2024-12-10T14:28:30,156 INFO [RS:0;db1d50717577:46699 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db1d50717577%2C46699%2C1733840717757.meta:.meta(num 1733840720637) 2024-12-10T14:28:30,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741832_1008 (size=13628841) 2024-12-10T14:28:30,160 DEBUG [RS:0;db1d50717577:46699 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/oldWALs 2024-12-10T14:28:30,160 INFO [RS:0;db1d50717577:46699 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db1d50717577%2C46699%2C1733840717757:(num 1733840719719) 2024-12-10T14:28:30,160 DEBUG [RS:0;db1d50717577:46699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:30,160 INFO [RS:0;db1d50717577:46699 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T14:28:30,160 INFO [RS:0;db1d50717577:46699 {}] hbase.ChoreService(370): Chore service for: regionserver/db1d50717577:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T14:28:30,160 INFO [regionserver/db1d50717577:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T14:28:30,161 INFO [RS:0;db1d50717577:46699 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46699 2024-12-10T14:28:30,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db1d50717577,46699,1733840717757 2024-12-10T14:28:30,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T14:28:30,167 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db1d50717577,46699,1733840717757] 2024-12-10T14:28:30,167 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing db1d50717577,46699,1733840717757; numProcessing=1 2024-12-10T14:28:30,168 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/db1d50717577,46699,1733840717757 already deleted, retry=false 2024-12-10T14:28:30,168 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; db1d50717577,46699,1733840717757 expired; onlineServers=0 2024-12-10T14:28:30,168 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db1d50717577,33823,1733840717045' ***** 2024-12-10T14:28:30,168 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T14:28:30,168 DEBUG [M:0;db1d50717577:33823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16ce5eb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db1d50717577/172.17.0.2:0 2024-12-10T14:28:30,168 INFO [M:0;db1d50717577:33823 {}] regionserver.HRegionServer(1224): stopping server db1d50717577,33823,1733840717045 2024-12-10T14:28:30,168 INFO [M:0;db1d50717577:33823 {}] regionserver.HRegionServer(1250): stopping server db1d50717577,33823,1733840717045; all regions closed. 2024-12-10T14:28:30,168 DEBUG [M:0;db1d50717577:33823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T14:28:30,168 DEBUG [M:0;db1d50717577:33823 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T14:28:30,169 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T14:28:30,169 DEBUG [M:0;db1d50717577:33823 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T14:28:30,169 DEBUG [master/db1d50717577:0:becomeActiveMaster-HFileCleaner.large.0-1733840719403 {}] cleaner.HFileCleaner(306): Exit Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.large.0-1733840719403,5,FailOnTimeoutGroup] 2024-12-10T14:28:30,169 DEBUG [master/db1d50717577:0:becomeActiveMaster-HFileCleaner.small.0-1733840719404 {}] cleaner.HFileCleaner(306): Exit Thread[master/db1d50717577:0:becomeActiveMaster-HFileCleaner.small.0-1733840719404,5,FailOnTimeoutGroup] 2024-12-10T14:28:30,169 INFO [M:0;db1d50717577:33823 {}] hbase.ChoreService(370): Chore service for: master/db1d50717577:0 had [] on shutdown 2024-12-10T14:28:30,169 DEBUG [M:0;db1d50717577:33823 {}] master.HMaster(1733): Stopping service threads 2024-12-10T14:28:30,169 INFO [M:0;db1d50717577:33823 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T14:28:30,169 ERROR [M:0;db1d50717577:33823 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (509200408) connection to localhost/127.0.0.1:38801 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:38801,5,PEWorkerGroup] 2024-12-10T14:28:30,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T14:28:30,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T14:28:30,170 INFO [M:0;db1d50717577:33823 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T14:28:30,170 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T14:28:30,170 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T14:28:30,170 DEBUG [M:0;db1d50717577:33823 {}] zookeeper.ZKUtil(347): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T14:28:30,170 WARN [M:0;db1d50717577:33823 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T14:28:30,170 INFO [M:0;db1d50717577:33823 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-10T14:28:30,170 INFO [M:0;db1d50717577:33823 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T14:28:30,171 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T14:28:30,171 INFO [M:0;db1d50717577:33823 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:28:30,171 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:28:30,171 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T14:28:30,171 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:28:30,171 INFO [M:0;db1d50717577:33823 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=742.92 KB heapSize=911.66 KB 2024-12-10T14:28:30,185 DEBUG [M:0;db1d50717577:33823 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5e19e303d8e3403fa0cf34a31c717afa is 82, key is hbase:meta,,1/info:regioninfo/1733840720773/Put/seqid=0 2024-12-10T14:28:30,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742427_1603 (size=5672) 2024-12-10T14:28:30,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:28:30,267 INFO [RS:0;db1d50717577:46699 {}] regionserver.HRegionServer(1307): Exiting; stopping=db1d50717577,46699,1733840717757; zookeeper connection closed. 2024-12-10T14:28:30,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46699-0x1019cc3ac5f0001, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:28:30,267 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d6a4d0c {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d6a4d0c 2024-12-10T14:28:30,268 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-10T14:28:30,589 INFO [M:0;db1d50717577:33823 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2076 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5e19e303d8e3403fa0cf34a31c717afa 2024-12-10T14:28:30,609 DEBUG [M:0;db1d50717577:33823 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34630b7b456a4a3faf5d9c8b3d19f42c is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x94/proc:d/1733840873380/Put/seqid=0 2024-12-10T14:28:30,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742428_1604 (size=43718) 2024-12-10T14:28:31,013 INFO [M:0;db1d50717577:33823 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=742.36 KB at sequenceid=2076 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34630b7b456a4a3faf5d9c8b3d19f42c 2024-12-10T14:28:31,016 INFO [M:0;db1d50717577:33823 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 34630b7b456a4a3faf5d9c8b3d19f42c 2024-12-10T14:28:31,031 DEBUG [M:0;db1d50717577:33823 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10416a36ecb44deb839a33d5522162d6 is 69, key is db1d50717577,46699,1733840717757/rs:state/1733840719485/Put/seqid=0 2024-12-10T14:28:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073742429_1605 (size=5156) 2024-12-10T14:28:31,435 INFO [M:0;db1d50717577:33823 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2076 (bloomFilter=true), to=hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10416a36ecb44deb839a33d5522162d6 2024-12-10T14:28:31,438 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5e19e303d8e3403fa0cf34a31c717afa as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5e19e303d8e3403fa0cf34a31c717afa 2024-12-10T14:28:31,440 INFO [M:0;db1d50717577:33823 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5e19e303d8e3403fa0cf34a31c717afa, entries=8, sequenceid=2076, filesize=5.5 K 2024-12-10T14:28:31,441 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34630b7b456a4a3faf5d9c8b3d19f42c as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34630b7b456a4a3faf5d9c8b3d19f42c 2024-12-10T14:28:31,443 INFO [M:0;db1d50717577:33823 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 34630b7b456a4a3faf5d9c8b3d19f42c 2024-12-10T14:28:31,443 INFO [M:0;db1d50717577:33823 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34630b7b456a4a3faf5d9c8b3d19f42c, entries=171, sequenceid=2076, filesize=42.7 K 2024-12-10T14:28:31,444 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10416a36ecb44deb839a33d5522162d6 as hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/10416a36ecb44deb839a33d5522162d6 2024-12-10T14:28:31,446 INFO [M:0;db1d50717577:33823 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38801/user/jenkins/test-data/0bbdbfff-2c54-b21e-9853-3419083d05da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/10416a36ecb44deb839a33d5522162d6, entries=1, sequenceid=2076, filesize=5.0 K 2024-12-10T14:28:31,446 INFO [M:0;db1d50717577:33823 {}] regionserver.HRegion(3040): Finished flush of dataSize ~742.92 KB/760745, heapSize ~911.36 KB/933232, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1275ms, sequenceid=2076, compaction requested=false 2024-12-10T14:28:31,447 INFO [M:0;db1d50717577:33823 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T14:28:31,448 DEBUG [M:0;db1d50717577:33823 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T14:28:31,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41075 is added to blk_1073741830_1006 (size=896892) 2024-12-10T14:28:31,450 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T14:28:31,450 INFO [M:0;db1d50717577:33823 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-10T14:28:31,450 INFO [M:0;db1d50717577:33823 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33823 2024-12-10T14:28:31,453 DEBUG [M:0;db1d50717577:33823 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/db1d50717577,33823,1733840717045 already deleted, retry=false 2024-12-10T14:28:31,554 INFO [M:0;db1d50717577:33823 {}] regionserver.HRegionServer(1307): Exiting; stopping=db1d50717577,33823,1733840717045; zookeeper connection closed. 2024-12-10T14:28:31,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:28:31,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33823-0x1019cc3ac5f0000, quorum=127.0.0.1:58494, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T14:28:31,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3054265c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T14:28:31,562 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65902fec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T14:28:31,562 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T14:28:31,562 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T14:28:31,562 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/hadoop.log.dir/,STOPPED} 2024-12-10T14:28:31,565 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T14:28:31,565 WARN [BP-1903503158-172.17.0.2-1733840714192 heartbeating to localhost/127.0.0.1:38801 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T14:28:31,565 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T14:28:31,565 WARN [BP-1903503158-172.17.0.2-1733840714192 heartbeating to localhost/127.0.0.1:38801 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1903503158-172.17.0.2-1733840714192 (Datanode Uuid f005e3c9-5d83-41ba-9f33-c9bb2afc7f0c) service to localhost/127.0.0.1:38801 2024-12-10T14:28:31,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/dfs/data/data1/current/BP-1903503158-172.17.0.2-1733840714192 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T14:28:31,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/cluster_f4b4a87d-946f-3b61-084c-969bf580dcf8/dfs/data/data2/current/BP-1903503158-172.17.0.2-1733840714192 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T14:28:31,568 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T14:28:31,575 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T14:28:31,575 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T14:28:31,576 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T14:28:31,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T14:28:31,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/bbde7802-b723-d646-a0ad-8a8dc18d3f49/hadoop.log.dir/,STOPPED} 2024-12-10T14:28:31,596 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-10T14:28:31,712 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down